From a0c7b765372d949cec54960dafcaadbc04b3204e Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Fri, 13 Jun 2025 10:02:24 -0700 Subject: [PATCH 001/181] Comment fixups from 626df47ad9. Reported-by: Peter Smith Discussion: https://postgr.es/m/CAHut+PspbHQmRCBL1c-opoJeTUKUaFFfUQJd2rhDZqwUrWCi7w@mail.gmail.com --- src/backend/executor/execGrouping.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 255bd795361a2..b540074935386 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -144,7 +144,7 @@ execTuplesHashPrepare(int numCols, * hashfunctions: FmgrInfos of datatype-specific hashing functions to use * collations: collations to use in comparisons * nbuckets: initial estimate of hashtable size - * additionalsize: size of data stored in ->additional + * additionalsize: size of data that may be stored along with the hash entry * metacxt: memory context for long-lived allocation, but not per-entry data * tablecxt: memory context in which to store table entries * tempcxt: short-lived context for evaluation hash and comparison functions @@ -288,7 +288,7 @@ ResetTupleHashTable(TupleHashTable hashtable) * * If isnew isn't NULL, then a new entry is created if no existing entry * matches. On return, *isnew is true if the entry is newly created, - * false if it existed already. ->additional_data in the new entry has + * false if it existed already. The additional data in the new entry has * been zeroed. */ TupleHashEntry From c45a1dba0d85c7a44f29f1841afd877ba4f4c683 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Fri, 13 Jun 2025 19:58:47 -0400 Subject: [PATCH 002/181] nbtree: _bt_readnextpage doesn't affect markPos. _bt_readnextpage expects so->currPos.buf to be InvalidBuffer (and for the position's page to be unlocked) when called. However, it does not expect there to be no pins held on any page. In particular, so->markPos might hold a separate pin, both before and after the call. Fix some comments that seemed to suggest otherwise. Follow-up commit to commit 7c319f54, which made _bt_killitems drop pins it acquired itself. --- src/backend/access/nbtree/nbtsearch.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 070f14c8b91f0..36544ecfd5878 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -2282,9 +2282,12 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir) * previously-saved right link or left link. lastcurrblkno is the page that * was current at the point where the blkno link was saved, which we use to * reason about concurrent page splits/page deletions during backwards scans. + * In the common case where seized=false, blkno is either so->currPos.nextPage + * or so->currPos.prevPage, and lastcurrblkno is so->currPos.currPage. * - * On entry, caller shouldn't hold any locks or pins on any page (we work - * directly off of blkno and lastcurrblkno instead). Parallel scan callers + * On entry, so->currPos shouldn't be locked by caller. so->currPos.buf must + * be InvalidBuffer/unpinned as needed by caller (note that lastcurrblkno + * won't need to be read again in almost all cases). Parallel scan callers * that seized the scan before calling here should pass seized=true; such a * caller's blkno and lastcurrblkno arguments come from the seized scan. * seized=false callers just pass us the blkno/lastcurrblkno taken from their @@ -2301,8 +2304,8 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir) * success exit (except during so->dropPin index scans, when we drop the pin * eagerly to avoid blocking VACUUM). * - * If there are no more matching records in the given direction, we drop all - * locks and pins, invalidate so->currPos, and return false. + * If there are no more matching records in the given direction, we invalidate + * so->currPos (while ensuring it retains no locks or pins), and return false. * * We always release the scan for a parallel scan caller, regardless of * success or failure; we'll call _bt_parallel_release as soon as possible. From ca307d5cec90a4fde62a50fafc8ab607ff1d8664 Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Sat, 14 Jun 2025 03:36:04 +0300 Subject: [PATCH 003/181] Keep WAL segments by slot's last saved restart LSN The patch fixes the issue with the unexpected removal of old WAL segments after checkpoint, followed by an immediate restart. The issue occurs when a slot is advanced after the start of the checkpoint and before old WAL segments are removed at the end of the checkpoint. The patch introduces a new in-memory state for slots: last_saved_restart_lsn, which is used to calculate the oldest LSN for removing WAL segments. This state is updated every time with the current restart_lsn at the moment when the slot is saved to disk. This fix changes the shared memory layout. It's applied to HEAD only because we don't have to preserve ABI compatibility during the beta stage. Another fix that doesn't affect the ABI is committed to back branches. Discussion: https://postgr.es/m/1d12d2-67235980-35-19a406a0%4063439497 Author: Vitaly Davydov Author: Alexander Korotkov Reviewed-by: Amit Kapila --- src/backend/replication/slot.c | 57 ++++++++++++++++++++++++++++++++++ src/include/replication/slot.h | 8 +++++ 2 files changed, 65 insertions(+) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 600b87fa9cb65..c64f020742f8f 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -424,6 +424,7 @@ ReplicationSlotCreate(const char *name, bool db_specific, slot->candidate_restart_valid = InvalidXLogRecPtr; slot->candidate_restart_lsn = InvalidXLogRecPtr; slot->last_saved_confirmed_flush = InvalidXLogRecPtr; + slot->last_saved_restart_lsn = InvalidXLogRecPtr; slot->inactive_since = 0; /* @@ -1165,20 +1166,41 @@ ReplicationSlotsComputeRequiredLSN(void) { ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i]; XLogRecPtr restart_lsn; + XLogRecPtr last_saved_restart_lsn; bool invalidated; + ReplicationSlotPersistency persistency; if (!s->in_use) continue; SpinLockAcquire(&s->mutex); + persistency = s->data.persistency; restart_lsn = s->data.restart_lsn; invalidated = s->data.invalidated != RS_INVAL_NONE; + last_saved_restart_lsn = s->last_saved_restart_lsn; SpinLockRelease(&s->mutex); /* invalidated slots need not apply */ if (invalidated) continue; + /* + * For persistent slot use last_saved_restart_lsn to compute the + * oldest LSN for removal of WAL segments. The segments between + * last_saved_restart_lsn and restart_lsn might be needed by a + * persistent slot in the case of database crash. Non-persistent + * slots can't survive the database crash, so we don't care about + * last_saved_restart_lsn for them. + */ + if (persistency == RS_PERSISTENT) + { + if (last_saved_restart_lsn != InvalidXLogRecPtr && + restart_lsn > last_saved_restart_lsn) + { + restart_lsn = last_saved_restart_lsn; + } + } + if (restart_lsn != InvalidXLogRecPtr && (min_required == InvalidXLogRecPtr || restart_lsn < min_required)) @@ -1216,7 +1238,9 @@ ReplicationSlotsComputeLogicalRestartLSN(void) { ReplicationSlot *s; XLogRecPtr restart_lsn; + XLogRecPtr last_saved_restart_lsn; bool invalidated; + ReplicationSlotPersistency persistency; s = &ReplicationSlotCtl->replication_slots[i]; @@ -1230,14 +1254,33 @@ ReplicationSlotsComputeLogicalRestartLSN(void) /* read once, it's ok if it increases while we're checking */ SpinLockAcquire(&s->mutex); + persistency = s->data.persistency; restart_lsn = s->data.restart_lsn; invalidated = s->data.invalidated != RS_INVAL_NONE; + last_saved_restart_lsn = s->last_saved_restart_lsn; SpinLockRelease(&s->mutex); /* invalidated slots need not apply */ if (invalidated) continue; + /* + * For persistent slot use last_saved_restart_lsn to compute the + * oldest LSN for removal of WAL segments. The segments between + * last_saved_restart_lsn and restart_lsn might be needed by a + * persistent slot in the case of database crash. Non-persistent + * slots can't survive the database crash, so we don't care about + * last_saved_restart_lsn for them. + */ + if (persistency == RS_PERSISTENT) + { + if (last_saved_restart_lsn != InvalidXLogRecPtr && + restart_lsn > last_saved_restart_lsn) + { + restart_lsn = last_saved_restart_lsn; + } + } + if (restart_lsn == InvalidXLogRecPtr) continue; @@ -1455,6 +1498,7 @@ ReplicationSlotReserveWal(void) Assert(slot != NULL); Assert(slot->data.restart_lsn == InvalidXLogRecPtr); + Assert(slot->last_saved_restart_lsn == InvalidXLogRecPtr); /* * The replication slot mechanism is used to prevent removal of required @@ -1766,6 +1810,8 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes, */ SpinLockAcquire(&s->mutex); + Assert(s->data.restart_lsn >= s->last_saved_restart_lsn); + restart_lsn = s->data.restart_lsn; /* we do nothing if the slot is already invalid */ @@ -1835,7 +1881,10 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes, * just rely on .invalidated. */ if (invalidation_cause == RS_INVAL_WAL_REMOVED) + { s->data.restart_lsn = InvalidXLogRecPtr; + s->last_saved_restart_lsn = InvalidXLogRecPtr; + } /* Let caller know */ *invalidated = true; @@ -2079,6 +2128,12 @@ CheckPointReplicationSlots(bool is_shutdown) SaveSlotToPath(s, path, LOG); } LWLockRelease(ReplicationSlotAllocationLock); + + /* + * Recompute the required LSN as SaveSlotToPath() updated + * last_saved_restart_lsn for slots. + */ + ReplicationSlotsComputeRequiredLSN(); } /* @@ -2354,6 +2409,7 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel) if (!slot->just_dirtied) slot->dirty = false; slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush; + slot->last_saved_restart_lsn = cp.slotdata.restart_lsn; SpinLockRelease(&slot->mutex); LWLockRelease(&slot->io_in_progress_lock); @@ -2569,6 +2625,7 @@ RestoreSlotFromDisk(const char *name) slot->effective_xmin = cp.slotdata.xmin; slot->effective_catalog_xmin = cp.slotdata.catalog_xmin; slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush; + slot->last_saved_restart_lsn = cp.slotdata.restart_lsn; slot->candidate_catalog_xmin = InvalidTransactionId; slot->candidate_xmin_lsn = InvalidXLogRecPtr; diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h index eb0b93b11141d..ffacba9d2ae52 100644 --- a/src/include/replication/slot.h +++ b/src/include/replication/slot.h @@ -215,6 +215,14 @@ typedef struct ReplicationSlot * recently stopped. */ TimestampTz inactive_since; + + /* + * Latest restart_lsn that has been flushed to disk. For persistent slots + * the flushed LSN should be taken into account when calculating the + * oldest LSN for WAL segments removal. + */ + XLogRecPtr last_saved_restart_lsn; + } ReplicationSlot; #define SlotIsPhysical(slot) ((slot)->data.database == InvalidOid) From eb124c3d6deb5d0c7a588d847e3840bcc2cd0dcc Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Sat, 14 Jun 2025 03:35:27 +0300 Subject: [PATCH 004/181] Add TAP tests to check replication slot advance during the checkpoint The new tests verify that logical and physical replication slots are still valid after an immediate restart on checkpoint completion when the slot was advanced during the checkpoint. This commit introduces two new injection points to make these tests possible: * checkpoint-before-old-wal-removal - triggered in the checkpointer process just before old WAL segments cleanup; * logical-replication-slot-advance-segment - triggered in LogicalConfirmReceivedLocation() when restart_lsn was changed enough to point to the next WAL segment. Discussion: https://postgr.es/m/flat/1d12d2-67235980-35-19a406a0%4063439497 Author: Vitaly Davydov Author: Tomas Vondra Reviewed-by: Alexander Korotkov Reviewed-by: Amit Kapila Backpatch-through: 17 --- src/backend/access/transam/xlog.c | 4 + src/backend/replication/logical/logical.c | 18 +++ src/test/recovery/meson.build | 2 + .../recovery/t/046_checkpoint_logical_slot.pl | 139 ++++++++++++++++++ .../t/047_checkpoint_physical_slot.pl | 133 +++++++++++++++++ 5 files changed, 296 insertions(+) create mode 100644 src/test/recovery/t/046_checkpoint_logical_slot.pl create mode 100644 src/test/recovery/t/047_checkpoint_physical_slot.pl diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 1914859b2eed7..47ffc0a230772 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -7498,6 +7498,10 @@ CreateCheckPoint(int flags) if (PriorRedoPtr != InvalidXLogRecPtr) UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr); +#ifdef USE_INJECTION_POINTS + INJECTION_POINT("checkpoint-before-old-wal-removal", NULL); +#endif + /* * Delete old log files, those no longer needed for last checkpoint to * prevent the disk holding the xlog from growing full. diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 1d56d0c4ef314..f1eb798f3e97a 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -29,6 +29,7 @@ #include "postgres.h" #include "access/xact.h" +#include "access/xlog_internal.h" #include "access/xlogutils.h" #include "fmgr.h" #include "miscadmin.h" @@ -41,6 +42,7 @@ #include "storage/proc.h" #include "storage/procarray.h" #include "utils/builtins.h" +#include "utils/injection_point.h" #include "utils/inval.h" #include "utils/memutils.h" @@ -1825,9 +1827,13 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn) { bool updated_xmin = false; bool updated_restart = false; + XLogRecPtr restart_lsn pg_attribute_unused(); SpinLockAcquire(&MyReplicationSlot->mutex); + /* remember the old restart lsn */ + restart_lsn = MyReplicationSlot->data.restart_lsn; + /* * Prevent moving the confirmed_flush backwards, as this could lead to * data duplication issues caused by replicating already replicated @@ -1881,6 +1887,18 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn) /* first write new xmin to disk, so we know what's up after a crash */ if (updated_xmin || updated_restart) { +#ifdef USE_INJECTION_POINTS + XLogSegNo seg1, + seg2; + + XLByteToSeg(restart_lsn, seg1, wal_segment_size); + XLByteToSeg(MyReplicationSlot->data.restart_lsn, seg2, wal_segment_size); + + /* trigger injection point, but only if segment changes */ + if (seg1 != seg2) + INJECTION_POINT("logical-replication-slot-advance-segment", NULL); +#endif + ReplicationSlotMarkDirty(); ReplicationSlotSave(); elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart); diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build index cb983766c6793..92429d2840257 100644 --- a/src/test/recovery/meson.build +++ b/src/test/recovery/meson.build @@ -54,6 +54,8 @@ tests += { 't/043_no_contrecord_switch.pl', 't/044_invalidate_inactive_slots.pl', 't/045_archive_restartpoint.pl', + 't/046_checkpoint_logical_slot.pl', + 't/047_checkpoint_physical_slot.pl' ], }, } diff --git a/src/test/recovery/t/046_checkpoint_logical_slot.pl b/src/test/recovery/t/046_checkpoint_logical_slot.pl new file mode 100644 index 0000000000000..b4265c4a6a53f --- /dev/null +++ b/src/test/recovery/t/046_checkpoint_logical_slot.pl @@ -0,0 +1,139 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group +# +# This test verifies the case when the logical slot is advanced during +# checkpoint. The test checks that the logical slot's restart_lsn still refers +# to an existed WAL segment after immediate restart. +# +use strict; +use warnings FATAL => 'all'; + +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; + +use Test::More; + +if ($ENV{enable_injection_points} ne 'yes') +{ + plan skip_all => 'Injection points not supported by this build'; +} + +my ($node, $result); + +$node = PostgreSQL::Test::Cluster->new('mike'); +$node->init; +$node->append_conf('postgresql.conf', + "shared_preload_libraries = 'injection_points'"); +$node->append_conf('postgresql.conf', "wal_level = 'logical'"); +$node->start; +$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); + +# Create a simple table to generate data into. +$node->safe_psql('postgres', + q{create table t (id serial primary key, b text)}); + +# Create the two slots we'll need. +$node->safe_psql('postgres', + q{select pg_create_logical_replication_slot('slot_logical', 'test_decoding')} +); +$node->safe_psql('postgres', + q{select pg_create_physical_replication_slot('slot_physical', true)}); + +# Advance both slots to the current position just to have everything "valid". +$node->safe_psql('postgres', + q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null)} +); +$node->safe_psql('postgres', + q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} +); + +# Run checkpoint to flush current state to disk and set a baseline. +$node->safe_psql('postgres', q{checkpoint}); + +# Generate some transactions to get RUNNING_XACTS. +my $xacts = $node->background_psql('postgres'); +$xacts->query_until( + qr/run_xacts/, + q(\echo run_xacts +SELECT 1 \watch 0.1 +\q +)); + +# Insert 2M rows; that's about 260MB (~20 segments) worth of WAL. +$node->safe_psql('postgres', + q{insert into t (b) select md5(i::text) from generate_series(1,1000000) s(i)} +); + +# Run another checkpoint to set a new restore LSN. +$node->safe_psql('postgres', q{checkpoint}); + +# Another 2M rows; that's about 260MB (~20 segments) worth of WAL. +$node->safe_psql('postgres', + q{insert into t (b) select md5(i::text) from generate_series(1,1000000) s(i)} +); + +# Run another checkpoint, this time in the background, and make it wait +# on the injection point) so that the checkpoint stops right before +# removing old WAL segments. +note('starting checkpoint\n'); + +my $checkpoint = $node->background_psql('postgres'); +$checkpoint->query_safe( + q(select injection_points_attach('checkpoint-before-old-wal-removal','wait')) +); +$checkpoint->query_until( + qr/starting_checkpoint/, + q(\echo starting_checkpoint +checkpoint; +\q +)); + +# Wait until the checkpoint stops right before removing WAL segments. +note('waiting for injection_point\n'); +$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal'); +note('injection_point is reached'); + +# Try to advance the logical slot, but make it stop when it moves to the next +# WAL segment (this has to happen in the background, too). +my $logical = $node->background_psql('postgres'); +$logical->query_safe( + q{select injection_points_attach('logical-replication-slot-advance-segment','wait');} +); +$logical->query_until( + qr/get_changes/, + q( +\echo get_changes +select count(*) from pg_logical_slot_get_changes('slot_logical', null, null) \watch 1 +\q +)); + +# Wait until the slot's restart_lsn points to the next WAL segment. +note('waiting for injection_point\n'); +$node->wait_for_event('client backend', + 'logical-replication-slot-advance-segment'); +note('injection_point is reached'); + +# OK, we're in the right situation: time to advance the physical slot, which +# recalculates the required LSN, and then unblock the checkpoint, which +# removes the WAL still needed by the logical slot. +$node->safe_psql('postgres', + q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} +); + +# Continue the checkpoint. +$node->safe_psql('postgres', + q{select injection_points_wakeup('checkpoint-before-old-wal-removal')}); + +# Abruptly stop the server (1 second should be enough for the checkpoint +# to finish; it would be better). +$node->stop('immediate'); + +$node->start; + +eval { + $node->safe_psql('postgres', + q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null);} + ); +}; +is($@, '', "Logical slot still valid"); + +done_testing(); diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl new file mode 100644 index 0000000000000..454e56b9bd2da --- /dev/null +++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl @@ -0,0 +1,133 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group +# +# This test verifies the case when the physical slot is advanced during +# checkpoint. The test checks that the physical slot's restart_lsn still refers +# to an existed WAL segment after immediate restart. +# +use strict; +use warnings FATAL => 'all'; + +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; + +use Test::More; + +if ($ENV{enable_injection_points} ne 'yes') +{ + plan skip_all => 'Injection points not supported by this build'; +} + +my ($node, $result); + +$node = PostgreSQL::Test::Cluster->new('mike'); +$node->init; +$node->append_conf('postgresql.conf', + "shared_preload_libraries = 'injection_points'"); +$node->append_conf('postgresql.conf', "wal_level = 'replica'"); +$node->start; +$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); + +# Create a simple table to generate data into. +$node->safe_psql('postgres', + q{create table t (id serial primary key, b text)}); + +# Create a physical replication slot. +$node->safe_psql('postgres', + q{select pg_create_physical_replication_slot('slot_physical', true)}); + +# Advance slot to the current position, just to have everything "valid". +$node->safe_psql('postgres', + q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} +); + +# Run checkpoint to flush current state to disk and set a baseline. +$node->safe_psql('postgres', q{checkpoint}); + +# Insert 2M rows; that's about 260MB (~20 segments) worth of WAL. +$node->safe_psql('postgres', + q{insert into t (b) select md5(i::text) from generate_series(1,100000) s(i)} +); + +# Advance slot to the current position, just to have everything "valid". +$node->safe_psql('postgres', + q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} +); + +# Run another checkpoint to set a new restore LSN. +$node->safe_psql('postgres', q{checkpoint}); + +# Another 2M rows; that's about 260MB (~20 segments) worth of WAL. +$node->safe_psql('postgres', + q{insert into t (b) select md5(i::text) from generate_series(1,1000000) s(i)} +); + +my $restart_lsn_init = $node->safe_psql('postgres', + q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'} +); +chomp($restart_lsn_init); +note("restart lsn before checkpoint: $restart_lsn_init"); + +# Run another checkpoint, this time in the background, and make it wait +# on the injection point) so that the checkpoint stops right before +# removing old WAL segments. +note('starting checkpoint'); + +my $checkpoint = $node->background_psql('postgres'); +$checkpoint->query_safe( + q{select injection_points_attach('checkpoint-before-old-wal-removal','wait')} +); +$checkpoint->query_until( + qr/starting_checkpoint/, + q(\echo starting_checkpoint +checkpoint; +\q +)); + +# Wait until the checkpoint stops right before removing WAL segments. +note('waiting for injection_point'); +$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal'); +note('injection_point is reached'); + +# OK, we're in the right situation: time to advance the physical slot, which +# recalculates the required LSN and then unblock the checkpoint, which +# removes the WAL still needed by the physical slot. +$node->safe_psql('postgres', + q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} +); + +# Continue the checkpoint. +$node->safe_psql('postgres', + q{select injection_points_wakeup('checkpoint-before-old-wal-removal')}); + +my $restart_lsn_old = $node->safe_psql('postgres', + q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'} +); +chomp($restart_lsn_old); +note("restart lsn before stop: $restart_lsn_old"); + +# Abruptly stop the server (1 second should be enough for the checkpoint +# to finish; it would be better). +$node->stop('immediate'); + +$node->start; + +# Get the restart_lsn of the slot right after restarting. +my $restart_lsn = $node->safe_psql('postgres', + q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'} +); +chomp($restart_lsn); +note("restart lsn: $restart_lsn"); + +# Get the WAL segment name for the slot's restart_lsn. +my $restart_lsn_segment = $node->safe_psql('postgres', + "SELECT pg_walfile_name('$restart_lsn'::pg_lsn)"); +chomp($restart_lsn_segment); + +# Check if the required wal segment exists. +note("required by slot segment name: $restart_lsn_segment"); +my $datadir = $node->data_dir; +ok( -f "$datadir/pg_wal/$restart_lsn_segment", + "WAL segment $restart_lsn_segment for physical slot's restart_lsn $restart_lsn exists" +); + +done_testing(); From be37ac20fc23511c49b9c56567c7c707b73d10b0 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Sat, 14 Jun 2025 10:37:12 +0900 Subject: [PATCH 005/181] psql: Report full protocol version in \conninfo output. Commit bba2fbc6238 modified \conninfo to display the protocol version used by the current connection, but it only showed the major version (e.g., 3). This commit updates \conninfo to display the full protocol version (e.g., 3.2). Since support for new version 3.2 was added in v18, and the server supports both 3.0 and 3.2, showing the complete version helps users understand exactly which protocol version the current session is using. Although this is a minor behavior change, it's considered a fix for an oversight in the original patch and is included in v18. Author: Fujii Masao Reviewed-by: David G. Johnston Discussion: https://postgr.es/m/685961b8-b6ce-40bb-b2d5-c2ff135d3388@oss.nttdata.com --- src/bin/psql/command.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 81a5ba844ba0f..1f7635d0c235c 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -778,6 +778,7 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch) int ssl_in_use, password_used, gssapi_used; + int version_num; char *paramval; if (!active_branch) @@ -793,7 +794,9 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch) /* Get values for the parameters */ host = PQhost(pset.db); hostaddr = PQhostaddr(pset.db); - protocol_version = psprintf("%d", PQprotocolVersion(pset.db)); + version_num = PQfullProtocolVersion(pset.db); + protocol_version = psprintf("%d.%d", version_num / 10000, + version_num % 10000); ssl_in_use = PQsslInUse(pset.db); password_used = PQconnectionUsedPassword(pset.db); gssapi_used = PQconnectionUsedGSSAPI(pset.db); From 0fe50417eca46d6993da350845dc09d7e899c6d6 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Sat, 14 Jun 2025 10:39:26 +0900 Subject: [PATCH 006/181] doc: Add note about "Client User" and "Superuser" fields in \conninfo output. In the \conninfo psql command, the "Client User" column shows the user who established the connection, while the "Superuser" column reflects whether the current user in the current execution context is a superuser. This means the users referred to in these columns can differ, for example, if the current user was changed with the SET ROLE command. This commit adds a note to the \conninfo documentation to clarify this behavior and avoid potential confusion. Author: Fujii Masao Reviewed-by: Robert Treat Reviewed-by: David G. Johnston Discussion: https://postgr.es/m/685961b8-b6ce-40bb-b2d5-c2ff135d3388@oss.nttdata.com --- doc/src/sgml/ref/psql-ref.sgml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index a7fd4a21d9f92..15e3f3a849226 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -1103,6 +1103,15 @@ SELECT $1 \parse stmt1 Outputs information about the current database connection, including TLS-related information if TLS is in use. + + Note that the Client User field shows + the user at the time of connection, while the + Superuser field indicates whether + the current user (in the current execution context) has + superuser privileges. These users are usually the same, but they can + differ, for example, if the current user was changed with the + SET ROLE command. + From 2f98f967fa78fd36279989ecdd5fbf74ab332fa9 Mon Sep 17 00:00:00 2001 From: David Rowley Date: Sat, 14 Jun 2025 17:18:31 +1200 Subject: [PATCH 007/181] Improve comments for TidRangeEval Here we provide a bit more detail on why TidRangeEval() does return false when trss_mintid is greater than trss_maxtid. Reported-by: Junwang Zhao Author: David Rowley Reviewed-by: Junwang Zhao Discussion: https://postgr.es/m/CAEG8a3KUbUUqQgfK5X8Sj-%2BppPtGNTU%2BZiep0Rxr7SLjoR%2BB6w%40mail.gmail.com --- src/backend/executor/nodeTidrangescan.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c index ab2eab9596e42..26f7420b64b0e 100644 --- a/src/backend/executor/nodeTidrangescan.c +++ b/src/backend/executor/nodeTidrangescan.c @@ -128,9 +128,11 @@ TidExprListCreate(TidRangeScanState *tidrangestate) * TidRangeEval * * Compute and set node's block and offset range to scan by evaluating - * the trss_tidexprs. Returns false if we detect the range cannot + * node->trss_tidexprs. Returns false if we detect the range cannot * contain any tuples. Returns true if it's possible for the range to - * contain tuples. + * contain tuples. We don't bother validating that trss_mintid is less + * than or equal to trss_maxtid, as the scan_set_tidrange() table AM + * function will handle that. * ---------------------------------------------------------------- */ static bool From 6d6480066c1a96c7130b97b1139fdada9d484f80 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sun, 15 Jun 2025 10:59:30 +0200 Subject: [PATCH 008/181] psql: Change new \conninfo to use SSL instead of TLS Commit bba2fbc6238 introduced a new implementation of the \conninfo command in psql. That new code uses the term "TLS" while the rest of PostgreSQL, including the rest of psql, consistently uses "SSL". This is uselessly confusing. This changes the new code to use "SSL" as well. Reviewed-by: Alvaro Herrera Discussion: https://www.postgresql.org/message-id/f4ff9294-b491-4053-83f5-11c10ab8c999@eisentraut.org --- doc/src/sgml/ref/psql-ref.sgml | 2 +- src/bin/psql/command.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index 15e3f3a849226..570ef21d1fce3 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -1101,7 +1101,7 @@ SELECT $1 \parse stmt1 Outputs information about the current database connection, - including TLS-related information if TLS is in use. + including SSL-related information if SSL is in use. Note that the Client User field shows diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 1f7635d0c235c..e26c010d044ee 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -877,11 +877,11 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch) printTableAddCell(&cont, _("Backend PID"), false, false); printTableAddCell(&cont, backend_pid, false, false); - /* TLS Connection */ - printTableAddCell(&cont, _("TLS Connection"), false, false); + /* SSL Connection */ + printTableAddCell(&cont, _("SSL Connection"), false, false); printTableAddCell(&cont, ssl_in_use ? _("true") : _("false"), false, false); - /* TLS Information */ + /* SSL Information */ if (ssl_in_use) { char *library, @@ -898,19 +898,19 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch) compression = (char *) PQsslAttribute(pset.db, "compression"); alpn = (char *) PQsslAttribute(pset.db, "alpn"); - printTableAddCell(&cont, _("TLS Library"), false, false); + printTableAddCell(&cont, _("SSL Library"), false, false); printTableAddCell(&cont, library ? library : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Protocol"), false, false); + printTableAddCell(&cont, _("SSL Protocol"), false, false); printTableAddCell(&cont, protocol ? protocol : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Key Bits"), false, false); + printTableAddCell(&cont, _("SSL Key Bits"), false, false); printTableAddCell(&cont, key_bits ? key_bits : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Cipher"), false, false); + printTableAddCell(&cont, _("SSL Cipher"), false, false); printTableAddCell(&cont, cipher ? cipher : _("unknown"), false, false); - printTableAddCell(&cont, _("TLS Compression"), false, false); + printTableAddCell(&cont, _("SSL Compression"), false, false); printTableAddCell(&cont, (compression && strcmp(compression, "off") != 0) ? _("true") : _("false"), false, false); From b27644bade0348d0dafd3036c47880a349fe9332 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 15 Jun 2025 13:04:24 -0400 Subject: [PATCH 009/181] Sync typedefs.list with the buildfarm. Our maintenance of typedefs.list has been a little haphazard (and apparently we can't alphabetize worth a darn). Replace the file with the authoritative list from our buildfarm, and run pgindent using that. I also updated the additions/exclusions lists in pgindent where necessary to keep pgindent from messing things up significantly. Notably, now that regex_t and some related names are macros not real typedefs, we have to whitelist them explicitly. The exclusions list has also drifted noticeably, presumably due to changes of system headers on the buildfarm animals that contribute to the list. Unlike in prior years, I've not manually added typedef names that are missing from the buildfarm's list because they are not used to declare any variables or fields. So there are a few places where the typedef declaration itself is formatted worse than before, e.g. typedef enum IoMethod. I could preserve the names that were manually added to the list previously, but I'd really prefer to find a less manual way of dealing with these cases. A quick grep finds about 75 such symbols, most of which have never gotten any special treatment. Per discussion among pgsql-release, doing this now seems appropriate even though we're still a week or two away from making the v18 branch. --- src/backend/utils/adt/mcxtfuncs.c | 2 +- src/include/access/heapam.h | 2 +- src/include/executor/nodeAgg.h | 2 +- src/include/storage/aio.h | 2 +- src/include/storage/copydir.h | 2 +- src/include/storage/sinval.h | 2 +- src/include/tcop/backend_startup.h | 2 +- src/include/utils/elog.h | 2 +- src/include/utils/skipsupport.h | 2 +- src/pl/plpython/plpy_cursorobject.c | 6 +- src/pl/plpython/plpy_planobject.c | 6 +- src/pl/plpython/plpy_resultobject.c | 6 +- src/pl/plpython/plpy_subxactobject.c | 6 +- src/test/modules/test_aio/test_aio.c | 4 +- src/tools/pgindent/pgindent | 9 +- src/tools/pgindent/typedefs.list | 145 ++++++++++++++++----------- 16 files changed, 116 insertions(+), 84 deletions(-) diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c index 396c2f223b4e1..fe6dce9cba3ec 100644 --- a/src/backend/utils/adt/mcxtfuncs.c +++ b/src/backend/utils/adt/mcxtfuncs.c @@ -38,7 +38,7 @@ typedef struct MemoryContextId { MemoryContext context; int context_id; -} MemoryContextId; +} MemoryContextId; /* * int_list_to_array diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index e48fe434cd393..3a9424c19c9ae 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -96,7 +96,7 @@ typedef struct HeapScanDescData uint32 rs_cindex; /* current tuple's index in vistuples */ uint32 rs_ntuples; /* number of visible tuples on page */ OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */ -} HeapScanDescData; +} HeapScanDescData; typedef struct HeapScanDescData *HeapScanDesc; typedef struct BitmapHeapScanDescData diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h index 34b82d0f5d17d..6c4891bbaeb49 100644 --- a/src/include/executor/nodeAgg.h +++ b/src/include/executor/nodeAgg.h @@ -264,7 +264,7 @@ typedef struct AggStatePerGroupData * NULL and not auto-replace it with a later input value. Only the first * non-NULL input will be auto-substituted. */ -} AggStatePerGroupData; +} AggStatePerGroupData; /* * AggStatePerPhaseData - per-grouping-set-phase state diff --git a/src/include/storage/aio.h b/src/include/storage/aio.h index f3726bc3dc511..e7a0a234b6cf2 100644 --- a/src/include/storage/aio.h +++ b/src/include/storage/aio.h @@ -36,7 +36,7 @@ typedef enum IoMethod #ifdef IOMETHOD_IO_URING_ENABLED IOMETHOD_IO_URING, #endif -} IoMethod; +} IoMethod; /* We'll default to worker based execution. */ #define DEFAULT_IO_METHOD IOMETHOD_WORKER diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 940d74462d129..f1d7beeed1a3d 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -17,7 +17,7 @@ typedef enum FileCopyMethod { FILE_COPY_METHOD_COPY, FILE_COPY_METHOD_CLONE, -} FileCopyMethod; +} FileCopyMethod; /* GUC parameters */ extern PGDLLIMPORT int file_copy_method; diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h index 5dc5aafe5c9ff..845a5851b574e 100644 --- a/src/include/storage/sinval.h +++ b/src/include/storage/sinval.h @@ -119,7 +119,7 @@ typedef struct Oid dbId; /* database ID */ Oid relid; /* relation ID, or 0 if whole * RelationSyncCache */ -} SharedInvalRelSyncMsg; +} SharedInvalRelSyncMsg; typedef union { diff --git a/src/include/tcop/backend_startup.h b/src/include/tcop/backend_startup.h index dcb9d056643f2..e8639688c00bc 100644 --- a/src/include/tcop/backend_startup.h +++ b/src/include/tcop/backend_startup.h @@ -86,7 +86,7 @@ typedef enum LogConnectionOption LOG_CONNECTION_AUTHENTICATION | LOG_CONNECTION_AUTHORIZATION | LOG_CONNECTION_SETUP_DURATIONS, -} LogConnectionOption; +} LogConnectionOption; /* * A collection of timings of various stages of connection establishment and diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h index 5eac0e16970c3..675f4f5f4694d 100644 --- a/src/include/utils/elog.h +++ b/src/include/utils/elog.h @@ -485,7 +485,7 @@ typedef enum PGERROR_TERSE, /* single-line error messages */ PGERROR_DEFAULT, /* recommended style */ PGERROR_VERBOSE, /* all the facts, ma'am */ -} PGErrorVerbosity; +} PGErrorVerbosity; extern PGDLLIMPORT int Log_error_verbosity; extern PGDLLIMPORT char *Log_line_prefix; diff --git a/src/include/utils/skipsupport.h b/src/include/utils/skipsupport.h index bc51847cf617a..c42be001fb546 100644 --- a/src/include/utils/skipsupport.h +++ b/src/include/utils/skipsupport.h @@ -90,7 +90,7 @@ typedef struct SkipSupportData */ SkipSupportIncDec decrement; SkipSupportIncDec increment; -} SkipSupportData; +} SkipSupportData; extern SkipSupport PrepareSkipSupportFromOpclass(Oid opfamily, Oid opcintype, bool reverse); diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c index 37d7efca77ce5..cc74c4df6ba67 100644 --- a/src/pl/plpython/plpy_cursorobject.c +++ b/src/pl/plpython/plpy_cursorobject.c @@ -58,9 +58,9 @@ static PyType_Slot PLyCursor_slots[] = static PyType_Spec PLyCursor_spec = { .name = "PLyCursor", - .basicsize = sizeof(PLyCursorObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLyCursor_slots, + .basicsize = sizeof(PLyCursorObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLyCursor_slots, }; static PyTypeObject *PLy_CursorType; diff --git a/src/pl/plpython/plpy_planobject.c b/src/pl/plpython/plpy_planobject.c index 6044893afdd13..edfb76c877020 100644 --- a/src/pl/plpython/plpy_planobject.c +++ b/src/pl/plpython/plpy_planobject.c @@ -45,9 +45,9 @@ static PyType_Slot PLyPlan_slots[] = static PyType_Spec PLyPlan_spec = { .name = "PLyPlan", - .basicsize = sizeof(PLyPlanObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLyPlan_slots, + .basicsize = sizeof(PLyPlanObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLyPlan_slots, }; static PyTypeObject *PLy_PlanType; diff --git a/src/pl/plpython/plpy_resultobject.c b/src/pl/plpython/plpy_resultobject.c index 0d9997cbaa32c..d433929b36039 100644 --- a/src/pl/plpython/plpy_resultobject.c +++ b/src/pl/plpython/plpy_resultobject.c @@ -70,9 +70,9 @@ static PyType_Slot PLyResult_slots[] = static PyType_Spec PLyResult_spec = { .name = "PLyResult", - .basicsize = sizeof(PLyResultObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLyResult_slots, + .basicsize = sizeof(PLyResultObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLyResult_slots, }; static PyTypeObject *PLy_ResultType; diff --git a/src/pl/plpython/plpy_subxactobject.c b/src/pl/plpython/plpy_subxactobject.c index c2484a99b4ae3..c225b652ab4a5 100644 --- a/src/pl/plpython/plpy_subxactobject.c +++ b/src/pl/plpython/plpy_subxactobject.c @@ -46,9 +46,9 @@ static PyType_Slot PLySubtransaction_slots[] = static PyType_Spec PLySubtransaction_spec = { .name = "PLySubtransaction", - .basicsize = sizeof(PLySubtransactionObject), - .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - .slots = PLySubtransaction_slots, + .basicsize = sizeof(PLySubtransactionObject), + .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .slots = PLySubtransaction_slots, }; static PyTypeObject *PLy_SubtransactionType; diff --git a/src/test/modules/test_aio/test_aio.c b/src/test/modules/test_aio/test_aio.c index 5cdfb89210b28..c55cf6c0aac05 100644 --- a/src/test/modules/test_aio/test_aio.c +++ b/src/test/modules/test_aio/test_aio.c @@ -42,9 +42,9 @@ typedef struct InjIoErrorState bool short_read_result_set; int short_read_result; -} InjIoErrorState; +} InjIoErrorState; -static InjIoErrorState * inj_io_error_state; +static InjIoErrorState *inj_io_error_state; /* Shared memory init callbacks */ static shmem_request_hook_type prev_shmem_request_hook = NULL; diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 54e138b598dfe..b7d718089248e 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -73,11 +73,14 @@ if ($sourcedir) # might make them so. For the moment we just hardwire a list of names # to add and a list of names to exclude; eventually this may need to be # easier to configure. Note that the typedefs need trailing newlines. -my @additional = ("bool\n"); +my @additional = map { "$_\n" } qw( + bool regex_t regmatch_t regoff +); my %excluded = map { +"$_\n" => 1 } qw( - ANY FD_SET U abs allocfunc boolean date digit ilist interval iterator other - pointer printfunc reference string timestamp type wrap + FD_SET LookupSet boolean date duration + element_type inquiry iterator other + pointer reference rep string timestamp type wrap ); # globals diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index a8346cda633ac..32d6e718adca0 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -6,6 +6,7 @@ ASN1_INTEGER ASN1_OBJECT ASN1_OCTET_STRING ASN1_STRING +ATAlterConstraint AV A_ArrayExpr A_Const @@ -47,7 +48,6 @@ AggSplit AggState AggStatePerAgg AggStatePerGroup -AggStatePerGroupData AggStatePerHash AggStatePerPhase AggStatePerTrans @@ -161,7 +161,6 @@ ArrayType AsyncQueueControl AsyncQueueEntry AsyncRequest -ATAlterConstraint AttInMetadata AttStatsSlot AttoptCacheEntry @@ -174,8 +173,8 @@ AttrNumber AttributeOpts AuthRequest AuthToken -AutoPrewarmSharedState AutoPrewarmReadStreamData +AutoPrewarmSharedState AutoVacOpts AutoVacuumShmemStruct AutoVacuumWorkItem @@ -222,7 +221,6 @@ BTScanInsertData BTScanKeyPreproc BTScanOpaque BTScanOpaqueData -BTScanPos BTScanPosData BTScanPosItem BTShared @@ -270,8 +268,8 @@ BitmapAndPath BitmapAndState BitmapHeapPath BitmapHeapScan -BitmapHeapScanInstrumentation BitmapHeapScanDesc +BitmapHeapScanInstrumentation BitmapHeapScanState BitmapIndexScan BitmapIndexScanState @@ -341,8 +339,8 @@ BufFile Buffer BufferAccessStrategy BufferAccessStrategyType -BufferCacheNumaRec BufferCacheNumaContext +BufferCacheNumaRec BufferCachePagesContext BufferCachePagesRec BufferDesc @@ -382,6 +380,9 @@ CTEMaterialize CTESearchClause CURL CURLM +CURLMcode +CURLMsg +CURLcode CURLoption CV CachedExpression @@ -628,6 +629,7 @@ DefElem DefElemAction DefaultACLInfo DefineStmt +DefnDumperPtr DeleteStmt DependencyGenerator DependencyGeneratorData @@ -677,9 +679,8 @@ DumpableObjectType DumpableObjectWithAcl DynamicFileList DynamicZoneAbbrev -EC_KEY -ECDerivesKey ECDerivesEntry +ECDerivesKey EDGE ENGINE EOM_flatten_into_method @@ -761,10 +762,12 @@ ExpandedRange ExpandedRecordFieldInfo ExpandedRecordHeader ExplainDirectModify_function +ExplainExtensionOption ExplainForeignModify_function ExplainForeignScan_function ExplainFormat ExplainOneQuery_hook_type +ExplainOptionHandler ExplainSerializeOption ExplainState ExplainStmt @@ -792,6 +795,7 @@ FDWCollateState FD_SET FILE FILETIME +FPI FSMAddress FSMPage FSMPageData @@ -806,7 +810,6 @@ FieldSelect FieldStore File FileBackupMethod -FileCopyMethod FileFdwExecutionState FileFdwPlanState FileNameMap @@ -1190,6 +1193,7 @@ HeapCheckContext HeapCheckReadStreamData HeapPageFreeze HeapScanDesc +HeapScanDescData HeapTuple HeapTupleData HeapTupleFields @@ -1249,6 +1253,7 @@ IndexClause IndexClauseSet IndexDeleteCounts IndexDeletePrefetchState +IndexDoCheckCallback IndexElem IndexFetchHeapData IndexFetchTableData @@ -1279,13 +1284,14 @@ InheritableSocket InitSampleScan_function InitializeDSMForeignScan_function InitializeWorkerForeignScan_function +InjIoErrorState InjectionPointCacheEntry InjectionPointCallback InjectionPointCondition InjectionPointConditionType InjectionPointEntry -InjectionPointsCtl InjectionPointSharedState +InjectionPointsCtl InlineCodeBlock InsertStmt Instrumentation @@ -1302,7 +1308,6 @@ IntoClause InvalMessageArray InvalidationInfo InvalidationMsgsGroup -IoMethod IoMethodOps IpcMemoryId IpcMemoryKey @@ -1492,8 +1497,7 @@ LLVMOrcResourceTrackerRef LLVMOrcSymbolStringPoolRef LLVMOrcThreadSafeContextRef LLVMOrcThreadSafeModuleRef -LLVMPassManagerBuilderRef -LLVMPassManagerRef +LLVMPassBuilderOptionsRef LLVMTargetMachineRef LLVMTargetRef LLVMTypeRef @@ -1563,6 +1567,7 @@ LoadStmt LocalBufferLookupEnt LocalPgBackendStatus LocalTransactionId +Location LocationIndex LocationLen LockAcquireResult @@ -1582,7 +1587,6 @@ LockTupleMode LockViewRecurse_context LockWaitPolicy LockingClause -LogConnectionOption LogOpts LogStmtLevel LogicalDecodeBeginCB @@ -1633,6 +1637,7 @@ LogicalSlotInfo LogicalSlotInfoArr LogicalTape LogicalTapeSet +LookupSet LsnReadQueue LsnReadQueueNextFun LsnReadQueueNextStatus @@ -1657,8 +1662,8 @@ ManyTestResourceKind Material MaterialPath MaterialState -MdfdVec MdPathStr +MdfdVec Memoize MemoizeEntry MemoizeInstrumentation @@ -1672,6 +1677,7 @@ MemoryContextCallback MemoryContextCallbackFunction MemoryContextCounters MemoryContextData +MemoryContextId MemoryContextMethodID MemoryContextMethods MemoryStatsPrintFunc @@ -1765,6 +1771,7 @@ NumericSortSupport NumericSumAccum NumericVar OAuthValidatorCallbacks +OAuthValidatorModuleInit OM_uint32 OP OSAPerGroupState @@ -1834,7 +1841,6 @@ PGCALL2 PGCRYPTO_SHA_t PGChecksummablePage PGContextVisibility -PGErrorVerbosity PGEvent PGEventConnDestroy PGEventConnReset @@ -1904,7 +1910,6 @@ PLpgSQL_exception PLpgSQL_exception_block PLpgSQL_execstate PLpgSQL_expr -PLpgSQL_func_hashkey PLpgSQL_function PLpgSQL_getdiag_kind PLpgSQL_if_elsif @@ -2155,10 +2160,10 @@ PermutationStepBlockerType PgAioBackend PgAioCtl PgAioHandle -PgAioHandleCallbackID -PgAioHandleCallbackStage PgAioHandleCallbackComplete +PgAioHandleCallbackID PgAioHandleCallbackReport +PgAioHandleCallbackStage PgAioHandleCallbacks PgAioHandleCallbacksEntry PgAioHandleFlags @@ -2203,9 +2208,9 @@ PgStatShared_Common PgStatShared_Database PgStatShared_Function PgStatShared_HashEntry +PgStatShared_IO PgStatShared_InjectionPoint PgStatShared_InjectionPointFixed -PgStatShared_IO PgStatShared_Relation PgStatShared_ReplSlot PgStatShared_SLRU @@ -2226,7 +2231,6 @@ PgStat_FunctionCallUsage PgStat_FunctionCounts PgStat_HashKey PgStat_IO -PgStat_Kind PgStat_KindInfo PgStat_LocalState PgStat_PendingDroppedStatsItem @@ -2354,12 +2358,12 @@ PushFilter PushFilterOps PushFunction PyCFunction -PyMappingMethods PyMethodDef PyModuleDef PyObject -PySequenceMethods PyTypeObject +PyType_Slot +PyType_Spec Py_ssize_t QPRS_STATE QTN2QTState @@ -2473,6 +2477,7 @@ RelOptInfo RelOptKind RelPathStr RelStatsInfo +RelSyncCallbackFunction RelToCheck RelToCluster RelabelType @@ -2625,7 +2630,6 @@ SQLDropObject SQLFunctionCache SQLFunctionCachePtr SQLFunctionHashEntry -SQLFunctionLink SQLFunctionParseInfo SQLFunctionParseInfoPtr SQLValueFunction @@ -2637,6 +2641,7 @@ STARTUPINFO STRLEN SV SYNCHRONIZATION_BARRIER +SYSTEM_INFO SampleScan SampleScanGetSampleSize_function SampleScanState @@ -2724,6 +2729,7 @@ SharedIncrementalSortInfo SharedIndexScanInstrumentation SharedInvalCatalogMsg SharedInvalCatcacheMsg +SharedInvalRelSyncMsg SharedInvalRelcacheMsg SharedInvalRelmapMsg SharedInvalSmgrMsg @@ -2763,7 +2769,7 @@ SingleBoundSortItem Size SkipPages SkipSupport -SkipSupportData +SkipSupportIncDec SlabBlock SlabContext SlabSlot @@ -2989,6 +2995,7 @@ TarMethodData TarMethodFile TargetEntry TclExceptionNameMap +Tcl_CmdInfo Tcl_DString Tcl_FileProc Tcl_HashEntry @@ -2996,6 +3003,7 @@ Tcl_HashTable Tcl_Interp Tcl_NotifierProcs Tcl_Obj +Tcl_Size Tcl_Time TempNamespaceStatus TestDSMRegistryStruct @@ -3141,6 +3149,7 @@ UnicodeNormalizationQC Unique UniquePath UniquePathMethod +UniqueRelInfo UniqueState UnlistenStmt UnresolvedTup @@ -3171,8 +3180,11 @@ VacuumRelation VacuumStmt ValidIOData ValidateIndexState -ValidatorModuleState ValidatorModuleResult +ValidatorModuleState +ValidatorShutdownCB +ValidatorStartupCB +ValidatorValidateCB ValuesScan ValuesScanState Var @@ -3377,10 +3389,9 @@ _resultmap _stringlist access_vector_t acquireLocksOnSubLinks_context -add_nulling_relids_context addFkConstraintSides +add_nulling_relids_context adjust_appendrel_attrs_context -allocfunc amadjustmembers_function ambeginscan_function ambuild_function @@ -3392,6 +3403,7 @@ amcostestimate_function amendscan_function amestimateparallelscan_function amgetbitmap_function +amgettreeheight_function amgettuple_function aminitparallelscan_function aminsert_function @@ -3402,13 +3414,27 @@ amparallelrescan_function amproperty_function amrescan_function amrestrpos_function -amtranslate_strategy_function amtranslatestrategy; -amtranslate_cmptype_function amtranslatecmptype; +amtranslate_cmptype_function +amtranslate_strategy_function amvacuumcleanup_function amvalidate_function array_iter array_unnest_fctx assign_collations_context +astreamer +astreamer_archive_context +astreamer_extractor +astreamer_gzip_decompressor +astreamer_gzip_writer +astreamer_lz4_frame +astreamer_member +astreamer_ops +astreamer_plain_writer +astreamer_recovery_injector +astreamer_tar_archiver +astreamer_tar_parser +astreamer_verify +astreamer_zstd_frame auth_password_hook_typ autovac_table av_relation @@ -3435,20 +3461,6 @@ bbsink_shell bbsink_state bbsink_throttle bbsink_zstd -astreamer -astreamer_archive_context -astreamer_extractor -astreamer_gzip_decompressor -astreamer_gzip_writer -astreamer_lz4_frame -astreamer_member -astreamer_ops -astreamer_plain_writer -astreamer_recovery_injector -astreamer_tar_archiver -astreamer_tar_parser -astreamer_verify -astreamer_zstd_frame bgworker_main_type bh_node_type binaryheap @@ -3488,6 +3500,13 @@ colormaprange compare_context config_handle config_var_value +conn_errorMessage_func +conn_oauth_client_id_func +conn_oauth_client_secret_func +conn_oauth_discovery_uri_func +conn_oauth_issuer_id_func +conn_oauth_scope_func +conn_sasl_state_func contain_aggs_of_level_context contain_placeholder_references_context convert_testexpr_context @@ -3504,6 +3523,9 @@ create_upper_paths_hook_type createdb_failure_params crosstab_HashEnt crosstab_cat_desc +curl_infotype +curl_socket_t +curl_version_info_data datapagemap_iterator_t datapagemap_t dateKEY @@ -3515,9 +3537,8 @@ deparse_columns deparse_context deparse_expr_cxt deparse_namespace -destructor +derives_hash dev_t -digit disassembledLeaf dlist_head dlist_iter @@ -3555,18 +3576,23 @@ dsm_handle dsm_op dsm_segment dsm_segment_detach_callback +duration eLogType ean13 eary ec_matches_callback_type ec_member_foreign_arg ec_member_matches_arg +element_type emit_log_hook_type eval_const_expressions_context exec_thread_arg execution_state exit_function explain_get_index_name_hook_type +explain_per_node_hook_type +explain_per_plan_hook_type +explain_validate_options_hook_type f_smgr fasthash_state fd_set @@ -3649,7 +3675,6 @@ gss_key_value_set_desc gss_name_t gtrgm_consistent_cache gzFile -hashfunc hbaPort heap_page_items_state help_handler @@ -3671,17 +3696,21 @@ init_function inline_cte_walker_context inline_error_callback_arg ino_t +inquiry instr_time int128 int16 int16KEY +int16_t int2vector int32 int32KEY int32_t int64 int64KEY +int64_t int8 +int8_t int8x16_t internalPQconninfoOption intptr_t @@ -3713,6 +3742,7 @@ lclContext lclTocEntry leafSegmentInfo leaf_item +libpq_gettext_func libpq_source line_t lineno_t @@ -3769,6 +3799,7 @@ mxact mxtruncinfo needs_fmgr_hook_type network_sortsupport_state +nl_item nodeitem normal_rand_fctx nsphash_hash @@ -3786,6 +3817,7 @@ openssl_tls_init_hook_typ ossl_EVP_cipher_func other output_type +overexplain_options pagetable_hash pagetable_iterator pairingheap @@ -3805,7 +3837,6 @@ pg_atomic_flag pg_atomic_uint32 pg_atomic_uint64 pg_be_sasl_mech -pg_case_map pg_category_range pg_checksum_context pg_checksum_raw_context @@ -3829,7 +3860,6 @@ pg_funcptr_t pg_gssinfo pg_hmac_ctx pg_hmac_errno -pg_int64 pg_local_to_utf_combined pg_locale_t pg_mb_radix_tree @@ -3898,7 +3928,8 @@ plperl_query_entry plpgsql_CastExprHashEntry plpgsql_CastHashEntry plpgsql_CastHashKey -plpgsql_HashEnt +plpgsql_expr_walker_callback +plpgsql_stmt_walker_callback pltcl_call_state pltcl_interp_desc pltcl_proc_desc @@ -3921,7 +3952,6 @@ printTextLineFormat printTextLineWrap printTextRule printXheaderWidthType -printfunc priv_map process_file_callback_t process_sublinks_context @@ -3961,12 +3991,9 @@ reduce_outer_joins_pass1_state reduce_outer_joins_pass2_state reference regex_arc_t -regex_t regexp regexp_matches_ctx registered_buffer -regmatch_t -regoff_t regproc relopt_bool relopt_enum @@ -3985,6 +4012,7 @@ remoteConnHashEnt remoteDep remove_nulling_relids_context rendezvousHashEntry +rep replace_rte_variables_callback replace_rte_variables_context report_error_fn @@ -4003,6 +4031,7 @@ rt_node_class_test_elem rt_radix_tree saophash_hash save_buffer +save_locale_t scram_state scram_state_enum script_error_callback_arg @@ -4010,6 +4039,8 @@ security_class_t sem_t sepgsql_context_info_t sequence_magic +set_conn_altsock_func +set_conn_oauth_token_func set_join_pathlist_hook_type set_rel_pathlist_hook_type shared_ts_iter @@ -4130,6 +4161,7 @@ uint32_t uint32x4_t uint64 uint64_t +uint64x2_t uint8 uint8_t uint8x16_t @@ -4139,7 +4171,6 @@ unicodeStyleColumnFormat unicodeStyleFormat unicodeStyleRowFormat unicode_linestyle -UniqueRelInfo unit_conversion unlogged_relation_entry utf_local_conversion_func @@ -4282,6 +4313,7 @@ xmlGenericErrorFunc xmlNodePtr xmlNodeSetPtr xmlParserCtxtPtr +xmlParserErrors xmlParserInputPtr xmlSaveCtxt xmlSaveCtxtPtr @@ -4302,6 +4334,3 @@ yyscan_t z_stream z_streamp zic_t -ExplainExtensionOption -ExplainOptionHandler -overexplain_options From fd385c4c62d1762c88c0cdb145f354c834875dce Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 15 Jun 2025 13:11:04 -0400 Subject: [PATCH 010/181] Add commit b27644bad to .git-blame-ignore-revs. --- .git-blame-ignore-revs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index eae70911df8d0..8048afd1a80fa 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -14,6 +14,9 @@ # # $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso +b27644bade0348d0dafd3036c47880a349fe9332 # 2025-06-15 13:04:24 -0400 +# Sync typedefs.list with the buildfarm. + 4672b6223910687b2aab075bcd2dd54ce90d5171 # 2025-06-01 14:55:24 -0400 # Run pgindent on the previous commit. From f83f14881c7a09198863cb46033af8959a462d8b Mon Sep 17 00:00:00 2001 From: John Naylor Date: Mon, 16 Jun 2025 09:27:15 +0700 Subject: [PATCH 011/181] Workaround code generation bug in clang At optimization level -O0, builds on recent clang fail to produce the correct CRC32C with our AVX-512 implementation. For now, just disable the runtime check for clang at -O0. When this is fixed upstream and we know the extent of the breakage, we can adjust to be version-specific. Reported-by: Soumyadeep Chakraborty Reported-by: Andy Fan Tested-by: Andy Fan Discussion: https://postgr.es/m/CAE-ML%2B-OV6p9uvCFBcSQjZUEh__y0h-KjN%2BBseyGJHt7u8EP%2Bw%40mail.gmail.com Discussion: https://postgr.es/m/87o6uqd3iv.fsf%40163.com --- src/port/pg_crc32c_sse42_choose.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/port/pg_crc32c_sse42_choose.c b/src/port/pg_crc32c_sse42_choose.c index 74d2421ba2be9..802e47788c10c 100644 --- a/src/port/pg_crc32c_sse42_choose.c +++ b/src/port/pg_crc32c_sse42_choose.c @@ -95,7 +95,9 @@ pg_comp_crc32c_choose(pg_crc32c crc, const void *data, size_t len) __cpuidex(exx, 7, 0); #endif -#ifdef USE_AVX512_CRC32C_WITH_RUNTIME_CHECK +#if defined(__clang__) && !defined(__OPTIMIZE__) + /* Some versions of clang are broken at -O0 */ +#elif defined(USE_AVX512_CRC32C_WITH_RUNTIME_CHECK) if (exx[2] & (1 << 10) && /* VPCLMULQDQ */ exx[1] & (1 << 31)) /* AVX512-VL */ pg_comp_crc32c = pg_comp_crc32c_avx512; From a876464abc73eccc04543da6a12eb5b1ffcd6dfd Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 16 Jun 2025 11:14:39 +0200 Subject: [PATCH 012/181] Message style improvements Some message style improvements in new code, and some small refactorings to make translations easier. --- src/bin/pg_basebackup/pg_createsubscriber.c | 12 +++--- src/bin/pg_basebackup/pg_recvlogical.c | 2 +- .../t/040_pg_createsubscriber.pl | 4 +- src/bin/pg_dump/pg_dump.c | 8 ++-- src/bin/pg_dump/pg_dumpall.c | 8 ++-- src/bin/pg_dump/pg_restore.c | 43 +++++++++++-------- src/bin/pg_dump/t/001_basic.pl | 4 +- src/bin/pg_dump/t/006_pg_dumpall.pl | 2 +- src/bin/pg_upgrade/check.c | 4 +- src/bin/pg_upgrade/relfilenumber.c | 12 +++--- src/bin/pg_upgrade/t/005_char_signedness.pl | 2 +- src/bin/pg_upgrade/task.c | 5 +-- src/bin/psql/command.c | 6 +-- src/bin/psql/describe.c | 5 ++- src/bin/psql/help.c | 6 +-- src/bin/psql/variables.c | 10 ++--- 16 files changed, 70 insertions(+), 63 deletions(-) diff --git a/src/bin/pg_basebackup/pg_createsubscriber.c b/src/bin/pg_basebackup/pg_createsubscriber.c index f65acc7cb1141..c43c0cbbba5a6 100644 --- a/src/bin/pg_basebackup/pg_createsubscriber.c +++ b/src/bin/pg_basebackup/pg_createsubscriber.c @@ -247,14 +247,14 @@ usage(void) printf(_(" %s [OPTION]...\n"), progname); printf(_("\nOptions:\n")); printf(_(" -a, --all create subscriptions for all databases except template\n" - " databases or databases that don't allow connections\n")); + " databases and databases that don't allow connections\n")); printf(_(" -d, --database=DBNAME database in which to create a subscription\n")); printf(_(" -D, --pgdata=DATADIR location for the subscriber data directory\n")); printf(_(" -n, --dry-run dry run, just show what would be done\n")); printf(_(" -p, --subscriber-port=PORT subscriber port number (default %s)\n"), DEFAULT_SUB_PORT); printf(_(" -P, --publisher-server=CONNSTR publisher connection string\n")); printf(_(" -R, --remove=OBJECTTYPE remove all objects of the specified type from specified\n" - " databases on the subscriber; accepts: publications\n")); + " databases on the subscriber; accepts: \"%s\"\n"), "publications"); printf(_(" -s, --socketdir=DIR socket directory to use (default current dir.)\n")); printf(_(" -t, --recovery-timeout=SECS seconds to wait for recovery to end\n")); printf(_(" -T, --enable-two-phase enable two-phase commit for all subscriptions\n")); @@ -973,7 +973,7 @@ check_publisher(const struct LogicalRepInfo *dbinfo) pg_log_warning("two_phase option will not be enabled for replication slots"); pg_log_warning_detail("Subscriptions will be created with the two_phase option disabled. " "Prepared transactions will be replicated at COMMIT PREPARED."); - pg_log_warning_hint("You can use --enable-two-phase switch to enable two_phase."); + pg_log_warning_hint("You can use the command-line option --enable-two-phase to enable two_phase."); } /* @@ -2143,7 +2143,7 @@ main(int argc, char **argv) if (!simple_string_list_member(&opt.objecttypes_to_remove, optarg)) simple_string_list_append(&opt.objecttypes_to_remove, optarg); else - pg_fatal("object type \"%s\" is specified more than once for -R/--remove", optarg); + pg_fatal("object type \"%s\" specified more than once for -R/--remove", optarg); break; case 's': opt.socket_dir = pg_strdup(optarg); @@ -2214,7 +2214,7 @@ main(int argc, char **argv) if (bad_switch) { - pg_log_error("%s cannot be used with -a/--all", bad_switch); + pg_log_error("options %s and -a/--all cannot be used together", bad_switch); pg_log_error_hint("Try \"%s --help\" for more information.", progname); exit(1); } @@ -2341,7 +2341,7 @@ main(int argc, char **argv) else { pg_log_error("invalid object type \"%s\" specified for -R/--remove", cell->val); - pg_log_error_hint("The valid option is: \"publications\""); + pg_log_error_hint("The valid value is: \"%s\"", "publications"); exit(1); } } diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index e6810efe5f0d7..4b4b545917d7d 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -91,7 +91,7 @@ usage(void) printf(_("\nOptions:\n")); printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n")); printf(_(" --failover enable replication slot synchronization to standby servers when\n" - " creating a slot\n")); + " creating a replication slot\n")); printf(_(" -f, --file=FILE receive log into this file, - for stdout\n")); printf(_(" -F --fsync-interval=SECS\n" " time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000)); diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl index 2d532fee567dd..df4924023fdf2 100644 --- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl +++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl @@ -399,7 +399,7 @@ sub generate_db '--database' => $db1, '--all', ], - qr/--database cannot be used with -a\/--all/, + qr/options --database and -a\/--all cannot be used together/, 'fail if --database is used with --all'); # run pg_createsubscriber with '--publication' and '--all' and verify @@ -416,7 +416,7 @@ sub generate_db '--all', '--publication' => 'pub1', ], - qr/--publication cannot be used with -a\/--all/, + qr/options --publication and -a\/--all cannot be used together/, 'fail if --publication is used with --all'); # run pg_createsubscriber with '--all' option diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 37432e66efd7c..7bc0724cd301f 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -6936,7 +6936,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, info->section = SECTION_POST_DATA; break; default: - pg_fatal("cannot dump statistics for relation kind '%c'", + pg_fatal("cannot dump statistics for relation kind \"%c\"", info->relkind); } @@ -9461,7 +9461,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) int i_consrc; int i_conislocal; - pg_log_info("finding invalid not null constraints"); + pg_log_info("finding invalid not-null constraints"); resetPQExpBuffer(q); appendPQExpBuffer(q, @@ -10855,7 +10855,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te) expected_te = expected_te->next; if (te != expected_te) - pg_fatal("stats dumped out of order (current: %d %s %s) (expected: %d %s %s)", + pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)", te->dumpId, te->desc, te->tag, expected_te->dumpId, expected_te->desc, expected_te->tag); @@ -10996,7 +10996,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te) appendStringLiteralAH(out, rsinfo->dobj.name, fout); if (PQgetisnull(res, rownum, i_attname)) - pg_fatal("attname cannot be NULL"); + pg_fatal("unexpected null attname"); attname = PQgetvalue(res, rownum, i_attname); /* diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 7f9c302b719ec..b1f388cb39160 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -525,7 +525,7 @@ main(int argc, char *argv[]) OPF = fopen(global_path, PG_BINARY_W); if (!OPF) - pg_fatal("could not open \"%s\": %m", global_path); + pg_fatal("could not open file \"%s\": %m", global_path); } else if (filename) { @@ -1659,14 +1659,14 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) /* Create a subdirectory with 'databases' name under main directory. */ if (mkdir(db_subdir, pg_dir_create_mode) != 0) - pg_fatal("could not create subdirectory \"%s\": %m", db_subdir); + pg_fatal("could not create directory \"%s\": %m", db_subdir); snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename); /* Create a map file (to store dboid and dbname) */ map_file = fopen(map_file_path, PG_BINARY_W); if (!map_file) - pg_fatal("could not open map file: %s", strerror(errno)); + pg_fatal("could not open file \"%s\": %m", map_file_path); } for (i = 0; i < PQntuples(res); i++) @@ -1976,7 +1976,7 @@ parseDumpFormat(const char *format) else if (pg_strcasecmp(format, "tar") == 0) archDumpFormat = archTar; else - pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"", + pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"", format); return archDumpFormat; diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index c4b6214d618cd..6ef789cb06d63 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -523,7 +523,7 @@ main(int argc, char **argv) */ if (!globals_only && opts->createDB != 1) { - pg_log_error("-C/--create option should be specified when restoring an archive created by pg_dumpall"); + pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall"); pg_log_error_hint("Try \"%s --help\" for more information.", progname); pg_log_error_hint("Individual databases can be restored using their specific archives."); exit_nicely(1); @@ -557,7 +557,7 @@ main(int argc, char **argv) if (conn) PQfinish(conn); - pg_log_info("database restoring skipped as -g/--globals-only option was specified"); + pg_log_info("database restoring skipped because option -g/--globals-only was specified"); } else { @@ -725,8 +725,8 @@ usage(const char *progname) printf(_(" --role=ROLENAME do SET ROLE before restore\n")); printf(_("\n" - "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n" - "and specified multiple times to select multiple objects.\n")); + "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n" + "combined and specified multiple times to select multiple objects.\n")); printf(_("\nIf no input file name is supplied, then standard input is used.\n\n")); printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT); printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL); @@ -946,7 +946,7 @@ get_dbnames_list_to_restore(PGconn *conn, query = createPQExpBuffer(); if (!conn) - pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore."); + pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore"); /* * Process one by one all dbnames and if specified to skip restoring, then @@ -992,7 +992,7 @@ get_dbnames_list_to_restore(PGconn *conn, if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res)) { skip_db_restore = true; - pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", dbidname->str, pat_cell->val); + pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val); } PQclear(res); @@ -1048,7 +1048,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi */ if (!file_exists_in_directory(dumpdirpath, "map.dat")) { - pg_log_info("database restoring is skipped as \"map.dat\" is not present in \"%s\"", dumpdirpath); + pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath); return 0; } @@ -1058,7 +1058,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi pfile = fopen(map_file_path, PG_BINARY_R); if (pfile == NULL) - pg_fatal("could not open \"%s\": %m", map_file_path); + pg_fatal("could not open file \"%s\": %m", map_file_path); initStringInfo(&linebuf); @@ -1086,10 +1086,10 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi /* Report error and exit if the file has any corrupted data. */ if (!OidIsValid(db_oid) || namelen <= 1) - pg_fatal("invalid entry in \"%s\" at line: %d", map_file_path, + pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path, count + 1); - pg_log_info("found database \"%s\" (OID: %u) in \"%s\"", + pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"", dbname, db_oid, map_file_path); dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1); @@ -1142,11 +1142,14 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, if (dbname_oid_list.head == NULL) return process_global_sql_commands(conn, dumpdirpath, opts->filename); - pg_log_info("found %d database names in \"map.dat\"", num_total_db); + pg_log_info(ngettext("found %d database name in \"%s\"", + "found %d database names in \"%s\"", + num_total_db), + num_total_db, "map.dat"); if (!conn) { - pg_log_info("trying to connect database \"postgres\""); + pg_log_info("trying to connect to database \"%s\"", "postgres"); conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost, opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, @@ -1155,7 +1158,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, /* Try with template1. */ if (!conn) { - pg_log_info("trying to connect database \"template1\""); + pg_log_info("trying to connect to database \"%s\"", "template1"); conn = ConnectDatabase("template1", NULL, opts->cparams.pghost, opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, @@ -1179,7 +1182,9 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath, /* Exit if no db needs to be restored. */ if (dbname_oid_list.head == NULL || num_db_restore == 0) { - pg_log_info("no database needs to restore out of %d databases", num_total_db); + pg_log_info(ngettext("no database needs restoring out of %d database", + "no database needs restoring out of %d databases", num_total_db), + num_total_db); return n_errors_total; } @@ -1314,7 +1319,7 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o pfile = fopen(global_file_path, PG_BINARY_R); if (pfile == NULL) - pg_fatal("could not open \"%s\": %m", global_file_path); + pg_fatal("could not open file \"%s\": %m", global_file_path); /* * If outfile is given, then just copy all global.dat file data into @@ -1354,15 +1359,17 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o break; default: n_errors++; - pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data); + pg_log_error("could not execute query: %s", PQerrorMessage(conn)); + pg_log_error_detail("Command was: %s", sqlstatement.data); } PQclear(result); } /* Print a summary of ignored errors during global.dat. */ if (n_errors) - pg_log_warning("ignored %d errors in \"%s\"", n_errors, global_file_path); - + pg_log_warning(ngettext("ignored %d error in file \"%s\"", + "ignored %d errors in file \"%s\"", n_errors), + n_errors, global_file_path); fclose(pfile); return n_errors; diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl index 84ca25e17d636..0be9f6dd538fd 100644 --- a/src/bin/pg_dump/t/001_basic.pl +++ b/src/bin/pg_dump/t/001_basic.pl @@ -261,6 +261,6 @@ command_fails_like( [ 'pg_dumpall', '--format', 'x' ], - qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/, - 'pg_dumpall: unrecognized archive format'); + qr/\Qpg_dumpall: error: unrecognized output format "x";\E/, + 'pg_dumpall: unrecognized output format'); done_testing(); diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl index 5acd49f1559d2..0ea02a3a4a940 100644 --- a/src/bin/pg_dump/t/006_pg_dumpall.pl +++ b/src/bin/pg_dump/t/006_pg_dumpall.pl @@ -365,7 +365,7 @@ "$tempdir/format_custom", '--format' => 'custom', '--file' => "$tempdir/error_test.sql", ], - qr/\Qpg_restore: error: -C\/--create option should be specified when restoring an archive created by pg_dumpall\E/, + qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, 'When -C is not used in pg_restore with dump of pg_dumpall'); # test case 2: When --list option is used with dump of pg_dumpall diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 940fc77fc2e8c..81865cd3e4859 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -885,7 +885,7 @@ check_cluster_versions(void) */ if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 && user_opts.char_signedness != -1) - pg_fatal("%s option cannot be used to upgrade from PostgreSQL %s and later.", + pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.", "--set-char-signedness", "18"); check_ok(); @@ -1934,7 +1934,7 @@ check_for_unicode_update(ClusterInfo *cluster) { fclose(report.file); report_status(PG_WARNING, "warning"); - pg_log(PG_WARNING, "Your installation contains relations that may be affected by a new version of Unicode.\n" + pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n" "A list of potentially-affected relations is in the file:\n" " %s", report.path); } diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c index 2959c07f0b8d1..8d8e816a01fa4 100644 --- a/src/bin/pg_upgrade/relfilenumber.c +++ b/src/bin/pg_upgrade/relfilenumber.c @@ -290,19 +290,19 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid, /* Create directory for stuff that is moved aside. */ if (pg_mkdir_p(moved_tblspc, pg_dir_create_mode) != 0 && errno != EEXIST) - pg_fatal("could not create directory \"%s\"", moved_tblspc); + pg_fatal("could not create directory \"%s\": %m", moved_tblspc); /* Create directory for old catalog files. */ if (pg_mkdir_p(old_catalog_dir, pg_dir_create_mode) != 0) - pg_fatal("could not create directory \"%s\"", old_catalog_dir); + pg_fatal("could not create directory \"%s\": %m", old_catalog_dir); /* Move the new cluster's database directory aside. */ if (rename(new_db_dir, moved_db_dir) != 0) - pg_fatal("could not rename \"%s\" to \"%s\"", new_db_dir, moved_db_dir); + pg_fatal("could not rename directory \"%s\" to \"%s\": %m", new_db_dir, moved_db_dir); /* Move the old cluster's database directory into place. */ if (rename(old_db_dir, new_db_dir) != 0) - pg_fatal("could not rename \"%s\" to \"%s\"", old_db_dir, new_db_dir); + pg_fatal("could not rename directory \"%s\" to \"%s\": %m", old_db_dir, new_db_dir); return true; } @@ -390,7 +390,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir, snprintf(dest, sizeof(dest), "%s/%s", old_catalog_dir, de->d_name); if (rename(path, dest) != 0) - pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest); + pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest); } if (errno) pg_fatal("could not read directory \"%s\": %m", new_db_dir); @@ -417,7 +417,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir, snprintf(dest, sizeof(dest), "%s/%s", new_db_dir, de->d_name); if (rename(path, dest) != 0) - pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest); + pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest); /* * We don't fsync() the database files in the file synchronization diff --git a/src/bin/pg_upgrade/t/005_char_signedness.pl b/src/bin/pg_upgrade/t/005_char_signedness.pl index 17fa0d48b15c1..cd8cff6f5132d 100644 --- a/src/bin/pg_upgrade/t/005_char_signedness.pl +++ b/src/bin/pg_upgrade/t/005_char_signedness.pl @@ -65,7 +65,7 @@ $mode ], 1, - [qr/--set-char-signedness option cannot be used/], + [qr/option --set-char-signedness cannot be used/], [], '--set-char-signedness option cannot be used for upgrading from v18 or later' ); diff --git a/src/bin/pg_upgrade/task.c b/src/bin/pg_upgrade/task.c index a48d56913908d..ee0e245715215 100644 --- a/src/bin/pg_upgrade/task.c +++ b/src/bin/pg_upgrade/task.c @@ -192,8 +192,7 @@ start_conn(const ClusterInfo *cluster, UpgradeTaskSlot *slot) slot->conn = PQconnectStart(conn_opts.data); if (!slot->conn) - pg_fatal("failed to create connection with connection string: \"%s\"", - conn_opts.data); + pg_fatal("out of memory"); termPQExpBuffer(&conn_opts); } @@ -402,7 +401,7 @@ wait_on_slots(UpgradeTaskSlot *slots, int numslots) * If we found socket(s) to wait on, wait. */ if (select_loop(maxFd, &input, &output) == -1) - pg_fatal("select() failed: %m"); + pg_fatal("%s() failed: %m", "select"); /* * Mark which sockets appear to be ready. diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index e26c010d044ee..83e84a778411a 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -1949,7 +1949,7 @@ exec_command_gexec(PsqlScanState scan_state, bool active_branch) { if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF) { - pg_log_error("\\gexec not allowed in pipeline mode"); + pg_log_error("\\%s not allowed in pipeline mode", "gexec"); clean_extended_state(); return PSQL_CMD_ERROR; } @@ -1975,7 +1975,7 @@ exec_command_gset(PsqlScanState scan_state, bool active_branch) if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF) { - pg_log_error("\\gset not allowed in pipeline mode"); + pg_log_error("\\%s not allowed in pipeline mode", "gset"); clean_extended_state(); return PSQL_CMD_ERROR; } @@ -3287,7 +3287,7 @@ exec_command_watch(PsqlScanState scan_state, bool active_branch, if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF) { - pg_log_error("\\watch not allowed in pipeline mode"); + pg_log_error("\\%s not allowed in pipeline mode", "watch"); clean_extended_state(); success = false; } diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 24e0100c9f0a8..dd25d2fe7b8a7 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -296,6 +296,7 @@ describeFunctions(const char *functypes, const char *func_pattern, char **arg_patterns, int num_arg_patterns, bool verbose, bool showSystem) { + const char *df_options = "anptwSx+"; bool showAggregate = strchr(functypes, 'a') != NULL; bool showNormal = strchr(functypes, 'n') != NULL; bool showProcedure = strchr(functypes, 'p') != NULL; @@ -310,9 +311,9 @@ describeFunctions(const char *functypes, const char *func_pattern, /* No "Parallel" column before 9.6 */ static const bool translate_columns_pre_96[] = {false, false, false, false, true, true, false, true, true, false, false, false, false}; - if (strlen(functypes) != strspn(functypes, "anptwSx+")) + if (strlen(functypes) != strspn(functypes, df_options)) { - pg_log_error("\\df only takes [anptwSx+] as options"); + pg_log_error("\\df only takes [%s] as options", df_options); return true; } diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index ce05b3a513255..db6adec8b692b 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -252,7 +252,8 @@ slashUsage(unsigned short int pager) HELP0(" \\dO[Sx+] [PATTERN] list collations\n"); HELP0(" \\dp[Sx] [PATTERN] list table, view, and sequence access privileges\n"); HELP0(" \\dP[itnx+] [PATTERN] list [only index/table] partitioned relations [n=nested]\n"); - HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]] list per-database role settings\n"); + HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]]\n" + " list per-database role settings\n"); HELP0(" \\drg[Sx] [PATTERN] list role grants\n"); HELP0(" \\dRp[x+] [PATTERN] list replication publications\n"); HELP0(" \\dRs[x+] [PATTERN] list replication subscriptions\n"); @@ -334,8 +335,7 @@ slashUsage(unsigned short int pager) HELP0(" \\endpipeline exit pipeline mode\n"); HELP0(" \\flush flush output data to the server\n"); HELP0(" \\flushrequest send request to the server to flush its output buffer\n"); - HELP0(" \\getresults [NUM_RES] read NUM_RES pending results. All pending results are\n" - " read if no argument is provided\n"); + HELP0(" \\getresults [NUM_RES] read NUM_RES pending results, or all if no argument\n"); HELP0(" \\parse STMT_NAME create a prepared statement\n"); HELP0(" \\sendpipeline send an extended query to an ongoing pipeline\n"); HELP0(" \\startpipeline enter pipeline mode\n"); diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c index ae2d0e5ed3f47..6b64302ebca86 100644 --- a/src/bin/psql/variables.c +++ b/src/bin/psql/variables.c @@ -204,7 +204,7 @@ ParseVariableDouble(const char *value, const char *name, double *result, double if ((value == NULL) || (*value == '\0')) { if (name) - pg_log_error("invalid input syntax for \"%s\"", name); + pg_log_error("invalid input syntax for variable \"%s\"", name); return false; } @@ -215,14 +215,14 @@ ParseVariableDouble(const char *value, const char *name, double *result, double if (dblval < min) { if (name) - pg_log_error("invalid value \"%s\" for \"%s\": must be greater than %.2f", + pg_log_error("invalid value \"%s\" for variable \"%s\": must be greater than %.2f", value, name, min); return false; } else if (dblval > max) { if (name) - pg_log_error("invalid value \"%s\" for \"%s\": must be less than %.2f", + pg_log_error("invalid value \"%s\" for variable \"%s\": must be less than %.2f", value, name, max); } *result = dblval; @@ -238,13 +238,13 @@ ParseVariableDouble(const char *value, const char *name, double *result, double (dblval == 0.0 || dblval >= HUGE_VAL || dblval <= -HUGE_VAL)) { if (name) - pg_log_error("\"%s\" is out of range for \"%s\"", value, name); + pg_log_error("value \"%s\" is out of range for variable \"%s\"", value, name); return false; } else { if (name) - pg_log_error("invalid value \"%s\" for \"%s\"", value, name); + pg_log_error("invalid value \"%s\" for variable \"%s\"", value, name); return false; } } From f24fdf985561e7166d7d54459b764daae8efda44 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 16 Jun 2025 11:16:52 +0200 Subject: [PATCH 013/181] libpq-oauth: Add exports.list to .gitignore --- src/interfaces/libpq-oauth/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/interfaces/libpq-oauth/.gitignore diff --git a/src/interfaces/libpq-oauth/.gitignore b/src/interfaces/libpq-oauth/.gitignore new file mode 100644 index 0000000000000..a4afe7c1c6858 --- /dev/null +++ b/src/interfaces/libpq-oauth/.gitignore @@ -0,0 +1 @@ +/exports.list From ee685c9baf987984dbd2194d74576422996d95f4 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 16 Jun 2025 11:43:52 +0200 Subject: [PATCH 014/181] doc: Clean up title case use --- doc/src/sgml/logical-replication.sgml | 12 ++++++------ doc/src/sgml/protocol.sgml | 4 ++-- doc/src/sgml/textsearch.sgml | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index 686dd441d0223..c32e6bc000d4d 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -2413,7 +2413,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Prepare for publisher upgrades + Prepare for Publisher Upgrades pg_upgrade attempts to migrate logical @@ -2485,7 +2485,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Prepare for subscriber upgrades + Prepare for Subscriber Upgrades Setup the @@ -2535,7 +2535,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Upgrading logical replication clusters + Upgrading Logical Replication Clusters While upgrading a subscriber, write operations can be performed in the @@ -2599,7 +2599,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER - Steps to upgrade a two-node logical replication cluster + Steps to Upgrade a Two-node Logical Replication Cluster Let's say publisher is in node1 and subscriber is in node2. The subscriber node2 has @@ -2743,7 +2743,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile - Steps to upgrade a cascaded logical replication cluster + Steps to Upgrade a Cascaded Logical Replication Cluster Let's say we have a cascaded logical replication setup node1->node2->node3. @@ -2972,7 +2972,7 @@ pg_ctl -D /opt/PostgreSQL/data3_upgraded start -l logfile - Steps to upgrade a two-node circular logical replication cluster + Steps to Upgrade a Two-node Circular Logical Replication Cluster Let's say we have a circular logical replication setup node1->node2 and diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index c4d3853cbf2c2..137ffc8d0b7eb 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -189,7 +189,7 @@ - Protocol versions + Protocol Versions The current, latest version of the protocol is version 3.2. However, for @@ -226,7 +226,7 @@ - Protocol versions + Protocol Versions diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml index 908857a54af5f..89928ed182913 100644 --- a/doc/src/sgml/textsearch.sgml +++ b/doc/src/sgml/textsearch.sgml @@ -1355,7 +1355,7 @@ ts_headline( config - Warning: Cross-site scripting (XSS) safety + Warning: Cross-site Scripting (XSS) Safety The output from ts_headline is not guaranteed to be safe for direct inclusion in web pages. When From e9a3615a5224236917af161d9b6a55ba8f129b4d Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 16 Jun 2025 12:36:01 -0400 Subject: [PATCH 015/181] aio: Add missing memory barrier when waiting for IO handle Previously there was no memory barrier enforcing correct memory ordering when waiting for a free IO handle. However, in the much more common case of waiting for IO to complete, memory barriers already were present. On strongly ordered architectures like x86 this had no negative consequences, but on some armv8 hardware (observed on Apple hardware), it was possible for the update, in the IO worker, to PgAioHandle->state to become visible before ->distilled_result becoming visible, leading to rather confusing assertion failures. The failures were rare enough that the bug sometimes took days to reproduce when running 027_stream_regress in a loop. Once finally debugged, it was easy enough to come up with a much quicker repro: Trigger a lot of very fast IO by limiting io_combine_limit to 1 and ensure that we always have to wait for a free handle by setting io_max_concurrency to 1. Triggering lots of concurrent seqscans in that setup triggers the issue within seconds. One reason this was hard to debug was that the assertion failure most commonly happened in WaitReadBuffers(), rather than in the AIO subsystem itself. The assertions added in this commit make problems like this easier to understand. Also add a comment to the IO worker explaining that we rely on the lwlock acquisition for correct memory ordering. I think it'd be good to add a tap test that stress tests buffer IO, but that's material for a separate patch. Thanks a lot to Alexander and Konstantin for all the debugging help. Reported-by: Tom Lane Reported-by: Alexander Lakhin Investigated-by: Andres Freund Investigated-by: Alexander Lakhin Investigated-by: Konstantin Knizhnik Discussion: https://postgr.es/m/2dkz7azclpeiqcmouamdixyn5xhlzy4rvikxrbovyzvi6rnv5c@pz7o7osv2ahf --- src/backend/storage/aio/aio.c | 17 +++++++++++++++++ src/backend/storage/aio/aio_callback.c | 7 +++++++ src/backend/storage/aio/method_worker.c | 7 ++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c index 6c6c0a908e21f..3643f27ad6e1b 100644 --- a/src/backend/storage/aio/aio.c +++ b/src/backend/storage/aio/aio.c @@ -556,6 +556,13 @@ bool pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state) { *state = ioh->state; + + /* + * Ensure that we don't see an earlier state of the handle than ioh->state + * due to compiler or CPU reordering. This protects both ->generation as + * directly used here, and other fields in the handle accessed in the + * caller if the handle was not reused. + */ pg_read_barrier(); return ioh->generation != ref_generation; @@ -773,7 +780,12 @@ pgaio_io_wait_for_free(void) * Note that no interrupts are processed between the state check * and the call to reclaim - that's important as otherwise an * interrupt could have already reclaimed the handle. + * + * Need to ensure that there's no reordering, in the more common + * paths, where we wait for IO, that's done by + * pgaio_io_was_recycled(). */ + pg_read_barrier(); pgaio_io_reclaim(ioh); reclaimed++; } @@ -852,7 +864,12 @@ pgaio_io_wait_for_free(void) * check and the call to reclaim - that's important as * otherwise an interrupt could have already reclaimed the * handle. + * + * Need to ensure that there's no reordering, in the more + * common paths, where we wait for IO, that's done by + * pgaio_io_was_recycled(). */ + pg_read_barrier(); pgaio_io_reclaim(ioh); break; } diff --git a/src/backend/storage/aio/aio_callback.c b/src/backend/storage/aio/aio_callback.c index 0ad9795bb7e0c..03c9bba080267 100644 --- a/src/backend/storage/aio/aio_callback.c +++ b/src/backend/storage/aio/aio_callback.c @@ -256,6 +256,9 @@ pgaio_io_call_complete_shared(PgAioHandle *ioh) pgaio_result_status_string(result.status), result.id, result.error_data, result.result); result = ce->cb->complete_shared(ioh, result, cb_data); + + /* the callback should never transition to unknown */ + Assert(result.status != PGAIO_RS_UNKNOWN); } ioh->distilled_result = result; @@ -290,6 +293,7 @@ pgaio_io_call_complete_local(PgAioHandle *ioh) /* start with distilled result from shared callback */ result = ioh->distilled_result; + Assert(result.status != PGAIO_RS_UNKNOWN); for (int i = ioh->num_callbacks; i > 0; i--) { @@ -306,6 +310,9 @@ pgaio_io_call_complete_local(PgAioHandle *ioh) pgaio_result_status_string(result.status), result.id, result.error_data, result.result); result = ce->cb->complete_local(ioh, result, cb_data); + + /* the callback should never transition to unknown */ + Assert(result.status != PGAIO_RS_UNKNOWN); } /* diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c index 743cccc2acd18..36be179678d7a 100644 --- a/src/backend/storage/aio/method_worker.c +++ b/src/backend/storage/aio/method_worker.c @@ -461,7 +461,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len) int nwakeups = 0; int worker; - /* Try to get a job to do. */ + /* + * Try to get a job to do. + * + * The lwlock acquisition also provides the necessary memory barrier + * to ensure that we don't see an outdated data in the handle. + */ LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE); if ((io_index = pgaio_worker_submission_queue_consume()) == UINT32_MAX) { From 33b06a20016d8dd8dbdc1f6d6a9d79477c1104c5 Mon Sep 17 00:00:00 2001 From: David Rowley Date: Tue, 17 Jun 2025 10:49:36 +1200 Subject: [PATCH 016/181] Fix possible Assert failure in verify_compact_attribute() Sometimes the TupleDesc used in verify_compact_attribute() is shared among backends, and since CompactAttribute.attcacheoff gets updated during tuple deformation, it was possible that another backend would set attcacheoff on a given CompactAttribute in the small window of time from when the attcacheoff from the live CompactAttribute was being set in the 'tmp' CompactAttribute and before the Assert verifying that the live and tmp CompactAttributes matched. Here we adjust the code to make a copy of the live CompactAttribute so that we're not trying to Assert against a shared copy of it. Author: David Rowley Reported-by: Alexander Lakhin Discussion: https://postgr.es/m/7195e408-758c-4031-8e61-4f842c716ac0@gmail.com --- src/backend/access/common/tupdesc.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index ffd0c78f905a5..020d00cd01ce7 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -142,10 +142,17 @@ void verify_compact_attribute(TupleDesc tupdesc, int attnum) { #ifdef USE_ASSERT_CHECKING - CompactAttribute *cattr = &tupdesc->compact_attrs[attnum]; + CompactAttribute cattr; Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum); CompactAttribute tmp; + /* + * Make a temp copy of the TupleDesc's CompactAttribute. This may be a + * shared TupleDesc and the attcacheoff might get changed by another + * backend. + */ + memcpy(&cattr, &tupdesc->compact_attrs[attnum], sizeof(CompactAttribute)); + /* * Populate the temporary CompactAttribute from the corresponding * Form_pg_attribute @@ -156,11 +163,11 @@ verify_compact_attribute(TupleDesc tupdesc, int attnum) * Make the attcacheoff match since it's been reset to -1 by * populate_compact_attribute_internal. Same with attnullability. */ - tmp.attcacheoff = cattr->attcacheoff; - tmp.attnullability = cattr->attnullability; + tmp.attcacheoff = cattr.attcacheoff; + tmp.attnullability = cattr.attnullability; /* Check the freshly populated CompactAttribute matches the TupleDesc's */ - Assert(memcmp(&tmp, cattr, sizeof(CompactAttribute)) == 0); + Assert(memcmp(&tmp, &cattr, sizeof(CompactAttribute)) == 0); #endif } From d87d07b7ad3b782cb74566cd771ecdb2823adf6a Mon Sep 17 00:00:00 2001 From: Masahiko Sawada Date: Mon, 16 Jun 2025 17:36:01 -0700 Subject: [PATCH 017/181] Fix re-distributing previously distributed invalidation messages during logical decoding. Commit 4909b38af0 introduced logic to distribute invalidation messages from catalog-modifying transactions to all concurrent in-progress transactions. However, since each transaction distributes not only its original invalidation messages but also previously distributed messages to other transactions, this leads to an exponential increase in allocation request size for invalidation messages, ultimately causing memory allocation failure. This commit fixes this issue by tracking distributed invalidation messages separately per decoded transaction and not redistributing these messages to other in-progress transactions. The maximum size of distributed invalidation messages that one transaction can store is limited to MAX_DISTR_INVAL_MSG_PER_TXN (8MB). Once the size of the distributed invalidation messages exceeds this threshold, we invalidate all caches in locations where distributed invalidation messages need to be executed. Back-patch to all supported versions where we introduced the fix by commit 4909b38af0. Note that this commit adds two new fields to ReorderBufferTXN to store the distributed transactions. This change breaks ABI compatibility in back branches, affecting third-party extensions that depend on the size of the ReorderBufferTXN struct, though this scenario seems unlikely. Additionally, it adds a new flag to the txn_flags field of ReorderBufferTXN to indicate distributed invalidation message overflow. This should not affect existing implementations, as it is unlikely that third-party extensions use unused bits in the txn_flags field. Bug: #18938 #18942 Author: vignesh C Reported-by: Duncan Sands Reported-by: John Hutchins Reported-by: Laurence Parry Reported-by: Max Madden Reported-by: Braulio Fdo Gonzalez Reviewed-by: Masahiko Sawada Reviewed-by: Amit Kapila Reviewed-by: Hayato Kuroda Discussion: https://postgr.es/m/680bdaf6-f7d1-4536-b580-05c2760c67c6@deepbluecap.com Discussion: https://postgr.es/m/18942-0ab1e5ae156613ad@postgresql.org Discussion: https://postgr.es/m/18938-57c9a1c463b68ce0@postgresql.org Discussion: https://postgr.es/m/CAD1FGCT2sYrP_70RTuo56QTizyc+J3wJdtn2gtO3VttQFpdMZg@mail.gmail.com Discussion: https://postgr.es/m/CANO2=B=2BT1hSYCE=nuuTnVTnjidMg0+-FfnRnqM6kd23qoygg@mail.gmail.com Backpatch-through: 13 --- .../expected/invalidation_distribution.out | 23 +- .../specs/invalidation_distribution.spec | 11 + .../replication/logical/reorderbuffer.c | 196 +++++++++++++++--- src/backend/replication/logical/snapbuild.c | 12 +- src/include/replication/reorderbuffer.h | 16 ++ 5 files changed, 222 insertions(+), 36 deletions(-) diff --git a/contrib/test_decoding/expected/invalidation_distribution.out b/contrib/test_decoding/expected/invalidation_distribution.out index ad0a944cbf303..ae53b1e61de3e 100644 --- a/contrib/test_decoding/expected/invalidation_distribution.out +++ b/contrib/test_decoding/expected/invalidation_distribution.out @@ -1,4 +1,4 @@ -Parsed test spec with 2 sessions +Parsed test spec with 3 sessions starting permutation: s1_insert_tbl1 s1_begin s1_insert_tbl1 s2_alter_pub_add_tbl s1_commit s1_insert_tbl1 s2_get_binary_changes step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); @@ -18,3 +18,24 @@ count stop (1 row) + +starting permutation: s1_begin s1_insert_tbl1 s3_begin s3_insert_tbl1 s2_alter_pub_add_tbl s1_insert_tbl1 s1_commit s3_commit s2_get_binary_changes +step s1_begin: BEGIN; +step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); +step s3_begin: BEGIN; +step s3_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (2, 2); +step s2_alter_pub_add_tbl: ALTER PUBLICATION pub ADD TABLE tbl1; +step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); +step s1_commit: COMMIT; +step s3_commit: COMMIT; +step s2_get_binary_changes: SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '4', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; +count +----- + 1 +(1 row) + +?column? +-------- +stop +(1 row) + diff --git a/contrib/test_decoding/specs/invalidation_distribution.spec b/contrib/test_decoding/specs/invalidation_distribution.spec index decbed627e327..67d41969ac1d6 100644 --- a/contrib/test_decoding/specs/invalidation_distribution.spec +++ b/contrib/test_decoding/specs/invalidation_distribution.spec @@ -28,5 +28,16 @@ setup { SET synchronous_commit=on; } step "s2_alter_pub_add_tbl" { ALTER PUBLICATION pub ADD TABLE tbl1; } step "s2_get_binary_changes" { SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '4', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; } +session "s3" +setup { SET synchronous_commit=on; } +step "s3_begin" { BEGIN; } +step "s3_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (2, 2); } +step "s3_commit" { COMMIT; } + # Expect to get one insert change. LOGICAL_REP_MSG_INSERT = 'I' permutation "s1_insert_tbl1" "s1_begin" "s1_insert_tbl1" "s2_alter_pub_add_tbl" "s1_commit" "s1_insert_tbl1" "s2_get_binary_changes" + +# Expect to get one insert change with LOGICAL_REP_MSG_INSERT = 'I' from +# the second "s1_insert_tbl1" executed after adding the table tbl1 to the +# publication in "s2_alter_pub_add_tbl". +permutation "s1_begin" "s1_insert_tbl1" "s3_begin" "s3_insert_tbl1" "s2_alter_pub_add_tbl" "s1_insert_tbl1" "s1_commit" "s3_commit" "s2_get_binary_changes" diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 676551118753d..c4299c76fb16b 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -109,10 +109,22 @@ #include "storage/procarray.h" #include "storage/sinval.h" #include "utils/builtins.h" +#include "utils/inval.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/relfilenumbermap.h" +/* + * Each transaction has an 8MB limit for invalidation messages distributed from + * other transactions. This limit is set considering scenarios with many + * concurrent logical decoding operations. When the distributed invalidation + * messages reach this threshold, the transaction is marked as + * RBTXN_DISTR_INVAL_OVERFLOWED to invalidate the complete cache as we have lost + * some inval messages and hence don't know what needs to be invalidated. + */ +#define MAX_DISTR_INVAL_MSG_PER_TXN \ + ((8 * 1024 * 1024) / sizeof(SharedInvalidationMessage)) + /* entry for a hash table we use to map from xid to our transaction state */ typedef struct ReorderBufferTXNByIdEnt { @@ -472,6 +484,12 @@ ReorderBufferFreeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) txn->invalidations = NULL; } + if (txn->invalidations_distributed) + { + pfree(txn->invalidations_distributed); + txn->invalidations_distributed = NULL; + } + /* Reset the toast hash */ ReorderBufferToastReset(rb, txn); @@ -2661,7 +2679,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, AbortCurrentTransaction(); /* make sure there's no cache pollution */ - ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); + if (rbtxn_distr_inval_overflowed(txn)) + { + Assert(txn->ninvalidations_distributed == 0); + InvalidateSystemCaches(); + } + else + { + ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); + ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed, + txn->invalidations_distributed); + } if (using_subtxn) RollbackAndReleaseCurrentSubTransaction(); @@ -2710,8 +2738,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, AbortCurrentTransaction(); /* make sure there's no cache pollution */ - ReorderBufferExecuteInvalidations(txn->ninvalidations, - txn->invalidations); + if (rbtxn_distr_inval_overflowed(txn)) + { + Assert(txn->ninvalidations_distributed == 0); + InvalidateSystemCaches(); + } + else + { + ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); + ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed, + txn->invalidations_distributed); + } if (using_subtxn) RollbackAndReleaseCurrentSubTransaction(); @@ -3060,7 +3097,8 @@ ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn, * We might have decoded changes for this transaction that could load * the cache as per the current transaction's view (consider DDL's * happened in this transaction). We don't want the decoding of future - * transactions to use those cache entries so execute invalidations. + * transactions to use those cache entries so execute only the inval + * messages in this transaction. */ if (txn->ninvalidations > 0) ReorderBufferImmediateInvalidation(rb, txn->ninvalidations, @@ -3147,9 +3185,10 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn) txn->final_lsn = lsn; /* - * Process cache invalidation messages if there are any. Even if we're not - * interested in the transaction's contents, it could have manipulated the - * catalog and we need to update the caches according to that. + * Process only cache invalidation messages in this transaction if there + * are any. Even if we're not interested in the transaction's contents, it + * could have manipulated the catalog and we need to update the caches + * according to that. */ if (txn->base_snapshot != NULL && txn->ninvalidations > 0) ReorderBufferImmediateInvalidation(rb, txn->ninvalidations, @@ -3421,6 +3460,57 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid, txn->ntuplecids++; } +/* + * Add new invalidation messages to the reorder buffer queue. + */ +static void +ReorderBufferQueueInvalidations(ReorderBuffer *rb, TransactionId xid, + XLogRecPtr lsn, Size nmsgs, + SharedInvalidationMessage *msgs) +{ + ReorderBufferChange *change; + + change = ReorderBufferAllocChange(rb); + change->action = REORDER_BUFFER_CHANGE_INVALIDATION; + change->data.inval.ninvalidations = nmsgs; + change->data.inval.invalidations = (SharedInvalidationMessage *) + palloc(sizeof(SharedInvalidationMessage) * nmsgs); + memcpy(change->data.inval.invalidations, msgs, + sizeof(SharedInvalidationMessage) * nmsgs); + + ReorderBufferQueueChange(rb, xid, lsn, change, false); +} + +/* + * A helper function for ReorderBufferAddInvalidations() and + * ReorderBufferAddDistributedInvalidations() to accumulate the invalidation + * messages to the **invals_out. + */ +static void +ReorderBufferAccumulateInvalidations(SharedInvalidationMessage **invals_out, + uint32 *ninvals_out, + SharedInvalidationMessage *msgs_new, + Size nmsgs_new) +{ + if (*ninvals_out == 0) + { + *ninvals_out = nmsgs_new; + *invals_out = (SharedInvalidationMessage *) + palloc(sizeof(SharedInvalidationMessage) * nmsgs_new); + memcpy(*invals_out, msgs_new, sizeof(SharedInvalidationMessage) * nmsgs_new); + } + else + { + /* Enlarge the array of inval messages */ + *invals_out = (SharedInvalidationMessage *) + repalloc(*invals_out, sizeof(SharedInvalidationMessage) * + (*ninvals_out + nmsgs_new)); + memcpy(*invals_out + *ninvals_out, msgs_new, + nmsgs_new * sizeof(SharedInvalidationMessage)); + *ninvals_out += nmsgs_new; + } +} + /* * Accumulate the invalidations for executing them later. * @@ -3441,7 +3531,6 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, { ReorderBufferTXN *txn; MemoryContext oldcontext; - ReorderBufferChange *change; txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); @@ -3456,35 +3545,76 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, Assert(nmsgs > 0); - /* Accumulate invalidations. */ - if (txn->ninvalidations == 0) - { - txn->ninvalidations = nmsgs; - txn->invalidations = (SharedInvalidationMessage *) - palloc(sizeof(SharedInvalidationMessage) * nmsgs); - memcpy(txn->invalidations, msgs, - sizeof(SharedInvalidationMessage) * nmsgs); - } - else + ReorderBufferAccumulateInvalidations(&txn->invalidations, + &txn->ninvalidations, + msgs, nmsgs); + + ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Accumulate the invalidations distributed by other committed transactions + * for executing them later. + * + * This function is similar to ReorderBufferAddInvalidations() but stores + * the given inval messages to the txn->invalidations_distributed with the + * overflow check. + * + * This needs to be called by committed transactions to distribute their + * inval messages to in-progress transactions. + */ +void +ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid, + XLogRecPtr lsn, Size nmsgs, + SharedInvalidationMessage *msgs) +{ + ReorderBufferTXN *txn; + MemoryContext oldcontext; + + txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); + + oldcontext = MemoryContextSwitchTo(rb->context); + + /* + * Collect all the invalidations under the top transaction, if available, + * so that we can execute them all together. See comments + * ReorderBufferAddInvalidations. + */ + txn = rbtxn_get_toptxn(txn); + + Assert(nmsgs > 0); + + if (!rbtxn_distr_inval_overflowed(txn)) { - txn->invalidations = (SharedInvalidationMessage *) - repalloc(txn->invalidations, sizeof(SharedInvalidationMessage) * - (txn->ninvalidations + nmsgs)); + /* + * Check the transaction has enough space for storing distributed + * invalidation messages. + */ + if (txn->ninvalidations_distributed + nmsgs >= MAX_DISTR_INVAL_MSG_PER_TXN) + { + /* + * Mark the invalidation message as overflowed and free up the + * messages accumulated so far. + */ + txn->txn_flags |= RBTXN_DISTR_INVAL_OVERFLOWED; - memcpy(txn->invalidations + txn->ninvalidations, msgs, - nmsgs * sizeof(SharedInvalidationMessage)); - txn->ninvalidations += nmsgs; + if (txn->invalidations_distributed) + { + pfree(txn->invalidations_distributed); + txn->invalidations_distributed = NULL; + txn->ninvalidations_distributed = 0; + } + } + else + ReorderBufferAccumulateInvalidations(&txn->invalidations_distributed, + &txn->ninvalidations_distributed, + msgs, nmsgs); } - change = ReorderBufferAllocChange(rb); - change->action = REORDER_BUFFER_CHANGE_INVALIDATION; - change->data.inval.ninvalidations = nmsgs; - change->data.inval.invalidations = (SharedInvalidationMessage *) - palloc(sizeof(SharedInvalidationMessage) * nmsgs); - memcpy(change->data.inval.invalidations, msgs, - sizeof(SharedInvalidationMessage) * nmsgs); - - ReorderBufferQueueChange(rb, xid, lsn, change, false); + /* Queue the invalidation messages into the transaction */ + ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs); MemoryContextSwitchTo(oldcontext); } diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 0d7bddbe4ed4e..adf18c397db43 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -794,6 +794,13 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact * contents built by the current transaction even after its decoding, * which should have been invalidated due to concurrent catalog * changing transaction. + * + * Distribute only the invalidation messages generated by the current + * committed transaction. Invalidation messages received from other + * transactions would have already been propagated to the relevant + * in-progress transactions. This transaction would have processed + * those invalidations, ensuring that subsequent transactions observe + * a consistent cache state. */ if (txn->xid != xid) { @@ -807,8 +814,9 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact { Assert(msgs != NULL); - ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn, - ninvalidations, msgs); + ReorderBufferAddDistributedInvalidations(builder->reorder, + txn->xid, lsn, + ninvalidations, msgs); } } } diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 24e88c409ba7e..fa0745552f86c 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -176,6 +176,7 @@ typedef struct ReorderBufferChange #define RBTXN_SENT_PREPARE 0x0200 #define RBTXN_IS_COMMITTED 0x0400 #define RBTXN_IS_ABORTED 0x0800 +#define RBTXN_DISTR_INVAL_OVERFLOWED 0x1000 #define RBTXN_PREPARE_STATUS_MASK (RBTXN_IS_PREPARED | RBTXN_SKIPPED_PREPARE | RBTXN_SENT_PREPARE) @@ -265,6 +266,12 @@ typedef struct ReorderBufferChange ((txn)->txn_flags & RBTXN_SKIPPED_PREPARE) != 0 \ ) +/* Is the array of distributed inval messages overflowed? */ +#define rbtxn_distr_inval_overflowed(txn) \ +( \ + ((txn)->txn_flags & RBTXN_DISTR_INVAL_OVERFLOWED) != 0 \ +) + /* Is this a top-level transaction? */ #define rbtxn_is_toptxn(txn) \ ( \ @@ -422,6 +429,12 @@ typedef struct ReorderBufferTXN uint32 ninvalidations; SharedInvalidationMessage *invalidations; + /* + * Stores cache invalidation messages distributed by other transactions. + */ + uint32 ninvalidations_distributed; + SharedInvalidationMessage *invalidations_distributed; + /* --- * Position in one of two lists: * * list of subtransactions if we are *known* to be subxact @@ -738,6 +751,9 @@ extern void ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid, CommandId cmin, CommandId cmax, CommandId combocid); extern void ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn, Size nmsgs, SharedInvalidationMessage *msgs); +extern void ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid, + XLogRecPtr lsn, Size nmsgs, + SharedInvalidationMessage *msgs); extern void ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations, SharedInvalidationMessage *invalidations); extern void ReorderBufferProcessXid(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn); From 23c67e8a839441c9d6a307b531a3369b677c91eb Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Mon, 16 Jun 2025 21:04:14 -0400 Subject: [PATCH 018/181] doc PG 18 relnotes: add author for initdb commit 04bec894a04 Needed to run src/tools//add_commit_links.pl. --- doc/src/sgml/release-18.sgml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index c5e60f88fdf15..0b49e23721612 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -60,7 +60,7 @@ Author: Peter Eisentraut -Change initdb default to enable data checksums +Change initdb default to enable data checksums (Greg Sabino Mullane) § @@ -165,6 +165,7 @@ Author: Tom Lane Execute AFTER triggers as the role that was active when trigger events were queued (Laurenz Albe) +§ From 6f55fb741147c49dc20dd2e4597363b2cc04acb4 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 17 Jun 2025 07:39:43 +0200 Subject: [PATCH 019/181] doc: Mention the default io_method It was previously not documented. Author: Daniel Westermann (DWE) Reviewed-by: Pavel Stehule Discussion: https://www.postgresql.org/message-id/flat/ZR0P278MB04279CB0C1D8F49DE68F168ED2AF2%40ZR0P278MB0427.CHEP278.PROD.OUTLOOK.COM --- doc/src/sgml/config.sgml | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 021153b2a5f27..7e0e5400ee128 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2779,6 +2779,7 @@ include_dir 'conf.d' + The default is worker. This parameter can only be set at server start. From c89d6b889ce958dcab27b0342eca36634c0fefaf Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Tue, 17 Jun 2025 14:14:36 +0200 Subject: [PATCH 020/181] amcheck: Test gin_index_check on a multicolumn index Adds a regression test with gin_index_check() on a multicolumn index, to verify it's handled correctly and improve test coverage for code introduced by 14ffaece0fb5. Author: Arseniy Mukhin Reviewed-by: Andrey M. Borodin Discussion: https://postgr.es/m/CAE7r3MJ611B9TE=YqBBncewp7-k64VWs+sjk7XF6fJUX77uFBA@mail.gmail.com --- contrib/amcheck/expected/check_gin.out | 12 ++++++++++++ contrib/amcheck/sql/check_gin.sql | 10 ++++++++++ 2 files changed, 22 insertions(+) diff --git a/contrib/amcheck/expected/check_gin.out b/contrib/amcheck/expected/check_gin.out index b4f0b110747c3..8dd01ced8d15f 100644 --- a/contrib/amcheck/expected/check_gin.out +++ b/contrib/amcheck/expected/check_gin.out @@ -76,3 +76,15 @@ SELECT gin_index_check('gin_check_jsonb_idx'); -- cleanup DROP TABLE gin_check_jsonb; +-- Test GIN multicolumn index +CREATE TABLE "gin_check_multicolumn"(a text[], b text[]); +INSERT INTO gin_check_multicolumn (a,b) values ('{a,c,e}','{b,d,f}'); +CREATE INDEX "gin_check_multicolumn_idx" on gin_check_multicolumn USING GIN(a,b); +SELECT gin_index_check('gin_check_multicolumn_idx'); + gin_index_check +----------------- + +(1 row) + +-- cleanup +DROP TABLE gin_check_multicolumn; diff --git a/contrib/amcheck/sql/check_gin.sql b/contrib/amcheck/sql/check_gin.sql index 66f42c34311db..11caed3d6a81b 100644 --- a/contrib/amcheck/sql/check_gin.sql +++ b/contrib/amcheck/sql/check_gin.sql @@ -50,3 +50,13 @@ SELECT gin_index_check('gin_check_jsonb_idx'); -- cleanup DROP TABLE gin_check_jsonb; + +-- Test GIN multicolumn index +CREATE TABLE "gin_check_multicolumn"(a text[], b text[]); +INSERT INTO gin_check_multicolumn (a,b) values ('{a,c,e}','{b,d,f}'); +CREATE INDEX "gin_check_multicolumn_idx" on gin_check_multicolumn USING GIN(a,b); + +SELECT gin_index_check('gin_check_multicolumn_idx'); + +-- cleanup +DROP TABLE gin_check_multicolumn; From 8dd41c0bff5b6944fb536cf8f2627d5f542581cd Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Tue, 17 Jun 2025 14:16:35 +0200 Subject: [PATCH 021/181] amcheck: Remove unused GinScanItem->parentlsn field The field was introduced by commit 14ffaece0fb5, but is unused and unnecessary. So remove it. Issues reported by Arseniy Mukhin, along with a proposed patch. Review by Andrey M. Borodin, cleanup and minor improvements by me. Author: Arseniy Mukhin Reviewed-by: Andrey M. Borodin Discussion: https://postgr.es/m/CAE7r3MJ611B9TE=YqBBncewp7-k64VWs+sjk7XF6fJUX77uFBA@mail.gmail.com --- contrib/amcheck/verify_gin.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c index b5f363562e32a..3f81a8a81d291 100644 --- a/contrib/amcheck/verify_gin.c +++ b/contrib/amcheck/verify_gin.c @@ -38,7 +38,6 @@ typedef struct GinScanItem int depth; IndexTuple parenttup; BlockNumber parentblk; - XLogRecPtr parentlsn; BlockNumber blkno; struct GinScanItem *next; } GinScanItem; @@ -421,7 +420,6 @@ gin_check_parent_keys_consistency(Relation rel, stack->depth = 0; stack->parenttup = NULL; stack->parentblk = InvalidBlockNumber; - stack->parentlsn = InvalidXLogRecPtr; stack->blkno = GIN_ROOT_BLKNO; while (stack) @@ -432,7 +430,6 @@ gin_check_parent_keys_consistency(Relation rel, OffsetNumber i, maxoff, prev_attnum; - XLogRecPtr lsn; IndexTuple prev_tuple; BlockNumber rightlink; @@ -442,7 +439,6 @@ gin_check_parent_keys_consistency(Relation rel, RBM_NORMAL, strategy); LockBuffer(buffer, GIN_SHARE); page = (Page) BufferGetPage(buffer); - lsn = BufferGetLSNAtomic(buffer); maxoff = PageGetMaxOffsetNumber(page); rightlink = GinPageGetOpaque(page)->rightlink; @@ -484,7 +480,6 @@ gin_check_parent_keys_consistency(Relation rel, ptr->depth = stack->depth; ptr->parenttup = CopyIndexTuple(stack->parenttup); ptr->parentblk = stack->parentblk; - ptr->parentlsn = stack->parentlsn; ptr->blkno = rightlink; ptr->next = stack->next; stack->next = ptr; @@ -614,7 +609,6 @@ gin_check_parent_keys_consistency(Relation rel, ptr->parenttup = NULL; ptr->parentblk = stack->blkno; ptr->blkno = GinGetDownlink(idxtuple); - ptr->parentlsn = lsn; ptr->next = stack->next; stack->next = ptr; } From 0b54b392334b255abeac7c2718de7071600444ad Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Tue, 17 Jun 2025 14:55:27 +0200 Subject: [PATCH 022/181] amcheck: Fix checks of entry order for GIN indexes This tightens a couple checks in checking GIN indexes, which might have resulted in incorrect results (false positives/negatives). * The code skipped ordering checks if the entries were for different attributes (for multi-column GIN indexes), possibly missing some cases of data corruption. But the attribute number is part of the ordering, so we can check that. * The root page was skipped when checking entry order, but that is unnecessary. The root page is subject to the same ordering rules, we can process it just like any other page. * The high key on the right-most page was not checked, but that is needed only for inner pages (we don't store the high key for those). For leaf pages we can check the high key just fine. * Correct the detection of split pages. If the page gets split, the cached parent key is greater than the current child key (not less, as the code incorrectly expected). Issues reported by Arseniy Mukhin, along with a proposed patch. Review by Andrey M. Borodin, cleanup and improvements by me. Author: Arseniy Mukhin Reviewed-by: Andrey M. Borodin Discussion: https://postgr.es/m/CAE7r3MJ611B9TE=YqBBncewp7-k64VWs+sjk7XF6fJUX77uFBA@mail.gmail.com --- contrib/amcheck/meson.build | 1 + contrib/amcheck/t/006_verify_gin.pl | 199 ++++++++++++++++++++++++++++ contrib/amcheck/verify_gin.c | 53 ++++---- 3 files changed, 229 insertions(+), 24 deletions(-) create mode 100644 contrib/amcheck/t/006_verify_gin.pl diff --git a/contrib/amcheck/meson.build b/contrib/amcheck/meson.build index b33e8c9b062fe..1f0c347ed5413 100644 --- a/contrib/amcheck/meson.build +++ b/contrib/amcheck/meson.build @@ -49,6 +49,7 @@ tests += { 't/003_cic_2pc.pl', 't/004_verify_nbtree_unique.pl', 't/005_pitr.pl', + 't/006_verify_gin.pl', ], }, } diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl new file mode 100644 index 0000000000000..7fdde170e06fb --- /dev/null +++ b/contrib/amcheck/t/006_verify_gin.pl @@ -0,0 +1,199 @@ + +# Copyright (c) 2021-2025, PostgreSQL Global Development Group + +use strict; +use warnings FATAL => 'all'; + +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; + +use Test::More; + +my $node; +my $blksize; + +# to get the split fast, we want tuples to be as large as possible, but the same time we don't want them to be toasted. +my $filler_size = 1900; + +# +# Test set-up +# +$node = PostgreSQL::Test::Cluster->new('test'); +$node->init(no_data_checksums => 1); +$node->append_conf('postgresql.conf', 'autovacuum=off'); +$node->start; +$blksize = int($node->safe_psql('postgres', 'SHOW block_size;')); +$node->safe_psql('postgres', q(CREATE EXTENSION amcheck)); +$node->safe_psql( + 'postgres', q( + CREATE OR REPLACE FUNCTION random_string( INT ) RETURNS text AS $$ + SELECT string_agg(substring('0123456789abcdefghijklmnopqrstuvwxyz', ceil(random() * 36)::integer, 1), '') from generate_series(1, $1); + $$ LANGUAGE SQL;)); + +# Tests +invalid_entry_order_leaf_page_test(); +invalid_entry_order_inner_page_test(); +invalid_entry_columns_order_test(); + +sub invalid_entry_order_leaf_page_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES ('{aaaaa,bbbbb}'); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # produce wrong order by replacing aaaaa with ccccc + string_replace_block( + $relpath, + 'aaaaa', + 'ccccc', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + like($stderr, qr/$expected/); +} + +sub invalid_entry_order_inner_page_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + # to break the order in the inner page we need at least 3 items (rightmost key in the inner level is not checked for the order) + # so fill table until we have 2 splits + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES (('{' || 'pppppppppp' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'qqqqqqqqqq' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'rrrrrrrrrr' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'ssssssssss' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'tttttttttt' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'uuuuuuuuuu' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'vvvvvvvvvv' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'wwwwwwwwww' || random_string($filler_size) ||'}')::text[]); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr.... + string_replace_block( + $relpath, + 'rrrrrrrrrr', + 'zzzzzzzzzz', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + like($stderr, qr/$expected/); +} + +sub invalid_entry_columns_order_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[],b text[]); + INSERT INTO $relname (a,b) VALUES ('{aaa}','{bbb}'); + CREATE INDEX $indexname ON $relname USING gin (a,b); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # mess column numbers + # root items order before: (1,aaa), (2,bbb) + # root items order after: (2,aaa), (1,bbb) + my $attrno_1 = pack('s', 1); + my $attrno_2 = pack('s', 2); + + my $find = qr/($attrno_1)(.)(aaa)/s; + my $replace = $attrno_2 . '$2$3'; + string_replace_block( + $relpath, + $find, + $replace, + $blkno + ); + + $find = qr/($attrno_2)(.)(bbb)/s; + $replace = $attrno_1 . '$2$3'; + string_replace_block( + $relpath, + $find, + $replace, + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + like($stderr, qr/$expected/); +} + +# Returns the filesystem path for the named relation. +sub relation_filepath +{ + my ($relname) = @_; + + my $pgdata = $node->data_dir; + my $rel = $node->safe_psql('postgres', + qq(SELECT pg_relation_filepath('$relname'))); + die "path not found for relation $relname" unless defined $rel; + return "$pgdata/$rel"; +} + +# substitute pattern 'find' with 'replace' within the block with number 'blkno' in the file 'filename' +sub string_replace_block +{ + my ($filename, $find, $replace, $blkno) = @_; + + my $fh; + open($fh, '+<', $filename) or BAIL_OUT("open failed: $!"); + binmode $fh; + + my $offset = $blkno * $blksize; + my $buffer; + + sysseek($fh, $offset, 0) or BAIL_OUT("seek failed: $!"); + sysread($fh, $buffer, $blksize) or BAIL_OUT("read failed: $!"); + + $buffer =~ s/$find/'"' . $replace . '"'/gee; + + sysseek($fh, $offset, 0) or BAIL_OUT("seek failed: $!"); + syswrite($fh, $buffer) or BAIL_OUT("write failed: $!"); + + close($fh) or BAIL_OUT("close failed: $!"); + + return; +} + +done_testing(); diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c index 3f81a8a81d291..3f7994a6bc698 100644 --- a/contrib/amcheck/verify_gin.c +++ b/contrib/amcheck/verify_gin.c @@ -459,17 +459,18 @@ gin_check_parent_keys_consistency(Relation rel, Datum parent_key = gintuple_get_key(&state, stack->parenttup, &parent_key_category); + OffsetNumber parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup); ItemId iid = PageGetItemIdCareful(rel, stack->blkno, page, maxoff); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); - OffsetNumber attnum = gintuple_get_attrnum(&state, idxtuple); + OffsetNumber page_max_key_attnum = gintuple_get_attrnum(&state, idxtuple); GinNullCategory page_max_key_category; Datum page_max_key = gintuple_get_key(&state, idxtuple, &page_max_key_category); if (rightlink != InvalidBlockNumber && - ginCompareEntries(&state, attnum, page_max_key, - page_max_key_category, parent_key, - parent_key_category) > 0) + ginCompareAttEntries(&state, page_max_key_attnum, page_max_key, + page_max_key_category, parent_key_attnum, + parent_key, parent_key_category) < 0) { /* split page detected, install right link to the stack */ GinScanItem *ptr; @@ -508,9 +509,7 @@ gin_check_parent_keys_consistency(Relation rel, { ItemId iid = PageGetItemIdCareful(rel, stack->blkno, page, i); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); - OffsetNumber attnum = gintuple_get_attrnum(&state, idxtuple); - GinNullCategory prev_key_category; - Datum prev_key; + OffsetNumber current_attnum = gintuple_get_attrnum(&state, idxtuple); GinNullCategory current_key_category; Datum current_key; @@ -523,20 +522,24 @@ gin_check_parent_keys_consistency(Relation rel, current_key = gintuple_get_key(&state, idxtuple, ¤t_key_category); /* - * First block is metadata, skip order check. Also, never check - * for high key on rightmost page, as this key is not really - * stored explicitly. + * Compare the entry to the preceding one. * - * Also make sure to not compare entries for different attnums, - * which may be stored on the same page. + * Don't check for high key on the rightmost inner page, as this + * key is not really stored explicitly. + * + * The entries may be for different attributes, so make sure to + * use ginCompareAttEntries for comparison. */ - if (i != FirstOffsetNumber && attnum == prev_attnum && stack->blkno != GIN_ROOT_BLKNO && - !(i == maxoff && rightlink == InvalidBlockNumber)) + if ((i != FirstOffsetNumber) && + !(i == maxoff && rightlink == InvalidBlockNumber && !GinPageIsLeaf(page))) { + Datum prev_key; + GinNullCategory prev_key_category; + prev_key = gintuple_get_key(&state, prev_tuple, &prev_key_category); - if (ginCompareEntries(&state, attnum, prev_key, - prev_key_category, current_key, - current_key_category) >= 0) + if (ginCompareAttEntries(&state, prev_attnum, prev_key, + prev_key_category, current_attnum, + current_key, current_key_category) >= 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" has wrong tuple order on entry tree page, block %u, offset %u, rightlink %u", @@ -551,13 +554,14 @@ gin_check_parent_keys_consistency(Relation rel, i == maxoff) { GinNullCategory parent_key_category; + OffsetNumber parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup); Datum parent_key = gintuple_get_key(&state, stack->parenttup, &parent_key_category); - if (ginCompareEntries(&state, attnum, current_key, - current_key_category, parent_key, - parent_key_category) > 0) + if (ginCompareAttEntries(&state, current_attnum, current_key, + current_key_category, parent_key_attnum, + parent_key, parent_key_category) > 0) { /* * There was a discrepancy between parent and child @@ -576,6 +580,7 @@ gin_check_parent_keys_consistency(Relation rel, stack->blkno, stack->parentblk); else { + parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup); parent_key = gintuple_get_key(&state, stack->parenttup, &parent_key_category); @@ -584,9 +589,9 @@ gin_check_parent_keys_consistency(Relation rel, * Check if it is properly adjusted. If succeed, * proceed to the next key. */ - if (ginCompareEntries(&state, attnum, current_key, - current_key_category, parent_key, - parent_key_category) > 0) + if (ginCompareAttEntries(&state, current_attnum, current_key, + current_key_category, parent_key_attnum, + parent_key, parent_key_category) > 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" has inconsistent records on page %u offset %u", @@ -638,7 +643,7 @@ gin_check_parent_keys_consistency(Relation rel, } prev_tuple = CopyIndexTuple(idxtuple); - prev_attnum = attnum; + prev_attnum = current_attnum; } LockBuffer(buffer, GIN_UNLOCK); From cdd1a431f21dbd2b7b675a9db1c24b97d713f38b Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Tue, 17 Jun 2025 15:46:26 +0200 Subject: [PATCH 023/181] amcheck: Fix parent key check in gin_index_check() The checks introduced by commit 14ffaece0fb5 did not get the parent key checks quite right, missing some data corruption cases. In particular: * The "rightlink" check was not working as intended, because rightlink is a BlockNumber, and InvalidBlockNumber is 0xFFFFFFFF, so !GinPageGetOpaque(page)->rightlink almost always evaluates to false (except for rightlink=0). So in most cases parenttup was left NULL, preventing any checks against parent. * Use GinGetDownlink() to retrieve child blkno to avoid triggering Assert, same as the core GIN code. Issues reported by Arseniy Mukhin, along with a proposed patch. Review by Andrey M. Borodin, cleanup and improvements by me. Author: Arseniy Mukhin Reviewed-by: Andrey M. Borodin Discussion: https://postgr.es/m/CAE7r3MJ611B9TE=YqBBncewp7-k64VWs+sjk7XF6fJUX77uFBA@mail.gmail.com --- contrib/amcheck/t/006_verify_gin.pl | 78 +++++++++++++++++++++++++++++ contrib/amcheck/verify_gin.c | 8 +-- 2 files changed, 82 insertions(+), 4 deletions(-) diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl index 7fdde170e06fb..308e53b2f75d0 100644 --- a/contrib/amcheck/t/006_verify_gin.pl +++ b/contrib/amcheck/t/006_verify_gin.pl @@ -34,6 +34,8 @@ invalid_entry_order_leaf_page_test(); invalid_entry_order_inner_page_test(); invalid_entry_columns_order_test(); +inconsistent_with_parent_key__parent_key_corrupted_test(); +inconsistent_with_parent_key__child_key_corrupted_test(); sub invalid_entry_order_leaf_page_test { @@ -159,6 +161,82 @@ sub invalid_entry_columns_order_test like($stderr, qr/$expected/); } +sub inconsistent_with_parent_key__parent_key_corrupted_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + # fill the table until we have a split + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES (('{' || 'llllllllll' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'mmmmmmmmmm' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'nnnnnnnnnn' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'xxxxxxxxxx' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'yyyyyyyyyy' || random_string($filler_size) ||'}')::text[]); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 1; # root + + # we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys + string_replace_block( + $relpath, + 'nnnnnnnnnn', + 'aaaaaaaaaa', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + like($stderr, qr/$expected/); +} + +sub inconsistent_with_parent_key__child_key_corrupted_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + # fill the table until we have a split + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) VALUES (('{' || 'llllllllll' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'mmmmmmmmmm' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'nnnnnnnnnn' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'xxxxxxxxxx' || random_string($filler_size) ||'}')::text[]); + INSERT INTO $relname (a) VALUES (('{' || 'yyyyyyyyyy' || random_string($filler_size) ||'}')::text[]); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 3; # leaf + + # we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger + string_replace_block( + $relpath, + 'nnnnnnnnnn', + 'pppppppppp', + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + like($stderr, qr/$expected/); +} + # Returns the filesystem path for the named relation. sub relation_filepath { diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c index 3f7994a6bc698..fb17e4613c6eb 100644 --- a/contrib/amcheck/verify_gin.c +++ b/contrib/amcheck/verify_gin.c @@ -608,10 +608,10 @@ gin_check_parent_keys_consistency(Relation rel, ptr = (GinScanItem *) palloc(sizeof(GinScanItem)); ptr->depth = stack->depth + 1; /* last tuple in layer has no high key */ - if (i != maxoff && !GinPageGetOpaque(page)->rightlink) - ptr->parenttup = CopyIndexTuple(idxtuple); - else + if (i == maxoff && rightlink == InvalidBlockNumber) ptr->parenttup = NULL; + else + ptr->parenttup = CopyIndexTuple(idxtuple); ptr->parentblk = stack->blkno; ptr->blkno = GinGetDownlink(idxtuple); ptr->next = stack->next; @@ -748,7 +748,7 @@ gin_refind_parent(Relation rel, BlockNumber parentblkno, ItemId p_iid = PageGetItemIdCareful(rel, parentblkno, parentpage, o); IndexTuple itup = (IndexTuple) PageGetItem(parentpage, p_iid); - if (ItemPointerGetBlockNumber(&(itup->t_tid)) == childblkno) + if (GinGetDownlink(itup) == childblkno) { /* Found it! Make copy and return it */ result = CopyIndexTuple(itup); From 0cf205e122ae0fe9333ccf843c2269f13ddc32fc Mon Sep 17 00:00:00 2001 From: Tomas Vondra Date: Tue, 17 Jun 2025 16:48:09 +0200 Subject: [PATCH 024/181] amcheck: Fix posting tree checks in gin_index_check() Fix two issues in parent_key validation in posting trees: * It's not enough to check stack->parentblk is valid to determine if the parentkey is valid. It's possible parentblk is set to a valid block number, but parentkey is invalid. So check parentkey directly. * We don't need to invalidate parentkey for all child pages of the rightmost page. It's enough to invalidate it for the rightmost child only, which means we can check more cases (less false negatives). Issues reported by Arseniy Mukhin, along with a proposed patch. Review by Andrey M. Borodin, cleanup and improvements by me. Author: Arseniy Mukhin Reviewed-by: Andrey M. Borodin Discussion: https://postgr.es/m/CAE7r3MJ611B9TE=YqBBncewp7-k64VWs+sjk7XF6fJUX77uFBA@mail.gmail.com --- contrib/amcheck/t/006_verify_gin.pl | 39 +++++++++++++++++++++++++++++ contrib/amcheck/verify_gin.c | 12 +++------ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl index 308e53b2f75d0..e540cd6606adf 100644 --- a/contrib/amcheck/t/006_verify_gin.pl +++ b/contrib/amcheck/t/006_verify_gin.pl @@ -36,6 +36,7 @@ invalid_entry_columns_order_test(); inconsistent_with_parent_key__parent_key_corrupted_test(); inconsistent_with_parent_key__child_key_corrupted_test(); +inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test(); sub invalid_entry_order_leaf_page_test { @@ -237,6 +238,44 @@ sub inconsistent_with_parent_key__child_key_corrupted_test like($stderr, qr/$expected/); } +sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test +{ + my $relname = "test"; + my $indexname = "test_gin_idx"; + + $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname; + CREATE TABLE $relname (a text[]); + INSERT INTO $relname (a) select ('{aaaaa}') from generate_series(1,10000); + CREATE INDEX $indexname ON $relname USING gin (a); + )); + my $relpath = relation_filepath($indexname); + + $node->stop; + + my $blkno = 2; # posting tree root + + # we have a posting tree for 'aaaaa' key with the root at 2nd block + # and two leaf pages 3 and 4. replace 4th page's high key with (1,1) + # so that there are tid's in leaf page that are larger then the new high key. + my $find = pack('S*', 0, 4, 0) . '....'; + my $replace = pack('S*', 0, 4, 0, 1, 1); + string_replace_block( + $relpath, + $find, + $replace, + $blkno + ); + + $node->start; + + my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; + like($stderr, qr/$expected/); +} + + # Returns the filesystem path for the named relation. sub relation_filepath { diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c index fb17e4613c6eb..c615d950736f6 100644 --- a/contrib/amcheck/verify_gin.c +++ b/contrib/amcheck/verify_gin.c @@ -345,7 +345,7 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting * Check if this tuple is consistent with the downlink in the * parent. */ - if (stack->parentblk != InvalidBlockNumber && i == maxoff && + if (i == maxoff && ItemPointerIsValid(&stack->parentkey) && ItemPointerCompare(&stack->parentkey, &posting_item->key) < 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), @@ -358,14 +358,10 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting ptr->depth = stack->depth + 1; /* - * Set rightmost parent key to invalid item pointer. Its value - * is 'Infinity' and not explicitly stored. + * The rightmost parent key is always invalid item pointer. + * Its value is 'Infinity' and not explicitly stored. */ - if (rightlink == InvalidBlockNumber) - ItemPointerSetInvalid(&ptr->parentkey); - else - ptr->parentkey = posting_item->key; - + ptr->parentkey = posting_item->key; ptr->parentblk = stack->blkno; ptr->blkno = BlockIdGetBlockNumber(&posting_item->child_blkno); ptr->next = stack->next; From 917c00d761fa9ba3b2bc365804ef38fc60ec023e Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Tue, 17 Jun 2025 22:42:38 +0200 Subject: [PATCH 025/181] Fix allocation check to test the right variable The memory allocation for cancelConn->be_cancel_key was accidentally checking the be_cancel_key member in the conn object instead of the one in cancelConn. Author: Ranier Vilela Reviewed-by: Daniel Gustafsson Discussion: https://postgr.es/m/CAEudQAq4ySDR6dsg9xwurBXwud02hX7XCOZZAcZx-JMn6A06nA@mail.gmail.com --- src/interfaces/libpq/fe-cancel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c index 8c7c198a53071..cd3102346bfa7 100644 --- a/src/interfaces/libpq/fe-cancel.c +++ b/src/interfaces/libpq/fe-cancel.c @@ -114,7 +114,7 @@ PQcancelCreate(PGconn *conn) if (conn->be_cancel_key != NULL) { cancelConn->be_cancel_key = malloc(conn->be_cancel_key_len); - if (!conn->be_cancel_key) + if (cancelConn->be_cancel_key == NULL) goto oom_error; memcpy(cancelConn->be_cancel_key, conn->be_cancel_key, conn->be_cancel_key_len); } From bb43c97babdf09efbd97bb535d2acb522678f977 Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Tue, 17 Jun 2025 20:00:38 -0400 Subject: [PATCH 026/181] doc PG 18 relnotes: add markup, still need to add links --- doc/src/sgml/release-18.sgml | 545 ++++++++++++++++++----------------- 1 file changed, 274 insertions(+), 271 deletions(-) diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index 0b49e23721612..ab83b1554001e 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -60,13 +60,13 @@ Author: Peter Eisentraut -Change initdb default to enable data checksums (Greg Sabino Mullane) +Change initdb default to enable data checksums (Greg Sabino Mullane) § -Checksums can be disabled with the new initdb option --no-data-checksums. -pg_upgrade requires matching cluster checksum settings, so this new +Checksums can be disabled with the new initdb option . +pg_upgrade requires matching cluster checksum settings, so this new option can be useful to upgrade non-checksum old clusters. @@ -83,7 +83,7 @@ Change time zone abbreviation handling (Tom Lane) -The system will now favor the current session's time zone abbreviations before checking the server variable timezone_abbreviations. Previously timezone_abbreviations was +The system will now favor the current session's time zone abbreviations before checking the server variable timezone_abbreviations. Previously timezone_abbreviations was checked first. @@ -100,8 +100,8 @@ Deprecate MD5 password authentication (Nathan Bossart) -Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords. -These warnings can be disabled by setting the md5_password_warnings parameter to "off". +Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords. +These warnings can be disabled by setting the md5_password_warnings parameter to off. @@ -112,12 +112,12 @@ Author: David Rowley -Change VACUUM and ANALYZE to process the inheritance children of a parent (Michael Harris) +Change VACUUM and ANALYZE to process the inheritance children of a parent (Michael Harris) § -The previous behavior can be performed by using the new ONLY option. +The previous behavior can be performed by using the new ONLY option. @@ -130,14 +130,15 @@ Author: Tom Lane -Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane) +Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane) § § -psql will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to Postgres 18 servers might experience \copy problems. This -release also enforces that \. must appear alone on a line. +psql will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to PostgreSQL 18 servers might +experience \copy problems. This +release also enforces that \. must appear alone on a line. @@ -153,7 +154,7 @@ Disallow unlogged partitioned tables (Michael Paquier) -Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged. +Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged. @@ -164,12 +165,12 @@ Author: Tom Lane -Execute AFTER triggers as the role that was active when trigger events were queued (Laurenz Albe) +Execute AFTER triggers as the role that was active when trigger events were queued (Laurenz Albe) § -Previously such triggers were run as the role that was active at trigger execution time (e.g., at COMMIT). This is significant for cases where the role is changed between queue time and +Previously such triggers were run as the role that was active at trigger execution time (e.g., at COMMIT). This is significant for cases where the role is changed between queue time and transaction commit. @@ -181,12 +182,12 @@ Author: Fujii Masao -Remove non-functional support for RULE privileges in GRANT/REVOKE (Fujii Masao) +Remove non-functional support for rule privileges in GRANT/REVOKE (Fujii Masao) § -These have been non-functional since Postgres 8.2. +These have been non-functional since PostgreSQL 8.2. @@ -197,12 +198,12 @@ Author: David Rowley -Remove column pg_backend_memory_contexts.parent (Melih Mutlu) +Remove column pg_backend_memory_contexts.parent (Melih Mutlu) § -This is now longer needed since pg_backend_memory_contexts.path was added. +This is no longer needed since pg_backend_memory_contexts.path was added. @@ -218,7 +219,7 @@ Author: Fujii Masao -Change pg_backend_memory_contexts.level and pg_log_backend_memory_contexts() to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, Fujii Masao) +Change pg_backend_memory_contexts.level and pg_log_backend_memory_contexts() to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, Fujii Masao) § § § @@ -257,12 +258,12 @@ Author: Alexander Korotkov -Remove some unnecessary table self-joins (Andrey Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) +Automatically remove some unnecessary table self-joins (Andrey Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) § -This optimization can be disabled using server variable enable_self_join_elimination. +This optimization can be disabled using server variable enable_self_join_elimination. @@ -273,7 +274,7 @@ Author: Alexander Korotkov -Convert some 'IN (VALUES ...)' to 'x = ANY ...' for better optimizer statistics (Alena Rybakina, Andrei Lepikhov) +Convert some IN (VALUES ...) to x = ANY ... for better optimizer statistics (Alena Rybakina, Andrei Lepikhov) § @@ -285,7 +286,7 @@ Author: Alexander Korotkov -Allow transforming OR-clauses to arrays for faster index processing (Alexander Korotkov, Andrey Lepikhov) +Allow transforming OR-clauses to arrays for faster index processing (Alexander Korotkov, Andrey Lepikhov) § @@ -303,7 +304,7 @@ Author: David Rowley -Speed up the processing of INTERSECT, EXCEPT, window aggregates, and view column aliases (Tom Lane, David Rowley) +Speed up the processing of INTERSECT, EXCEPT, window aggregates, and view column aliases (Tom Lane, David Rowley) § § § @@ -318,12 +319,12 @@ Author: Richard Guo -Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo) +Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo) § -This optimization can be disabled using enable_distinct_reordering. +This optimization can be disabled using enable_distinct_reordering. @@ -334,12 +335,12 @@ Author: David Rowley -Ignore GROUP BY columns that are functionally dependent on other columns (Zhang Mingli, Jian He, David Rowley) +Ignore GROUP BY columns that are functionally dependent on other columns (Zhang Mingli, Jian He, David Rowley) § -If a GROUP BY clause includes all columns of a unique index, as well as other columns of the same table, those other columns are redundant and can be dropped +If a GROUP BY clause includes all columns of a unique index, as well as other columns of the same table, those other columns are redundant and can be dropped from the grouping. This was already true for non-deferred primary keys. @@ -357,7 +358,7 @@ Author: Richard Guo -Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richard Guo) +Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richard Guo) § § § @@ -365,7 +366,7 @@ Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richar -This allows earlier row filtering. This release also fixes some GROUPING SETS queries that used to return incorrect results. +This allows earlier row filtering. This release also fixes some GROUPING SETS queries that used to return incorrect results. @@ -378,7 +379,7 @@ Author: Dean Rasheed -Improve row estimates for generate_series() using numeric and timestamp values (David Rowley, Song Jinzhou) +Improve row estimates for generate_series() using numeric and timestamp values (David Rowley, Song Jinzhou) § § @@ -391,7 +392,7 @@ Author: Richard Guo -Allow the optimizer to use "Right Semi Join" plans (Richard Guo) +Allow the optimizer to use Right Semi Join plans (Richard Guo) § @@ -463,7 +464,7 @@ Author: Tom Lane -Improve SQL-language function plan caching (Alexander Pyhalov, Tom Lane) +Improve SQL-language function plan caching (Alexander Pyhalov, Tom Lane) § § @@ -536,7 +537,7 @@ Author: Tomas Vondra -Allow GIN indexes to be created in parallel (Tomas Vondra, Matthias van de Meent) +Allow GIN indexes to be created in parallel (Tomas Vondra, Matthias van de Meent) § @@ -548,7 +549,7 @@ Author: Heikki Linnakangas -Allow values to be sorted to speed rangetype GiST and btree index builds (Bernd Helmle) +Allow values to be sorted to speed range-type GiST and btree index builds (Bernd Helmle) § @@ -605,8 +606,8 @@ Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavu This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc. -This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables -effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used +This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables +effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used for asynchronous I/O. @@ -638,7 +639,7 @@ Author: Jeff Davis -Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis) +Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis) § § § @@ -647,7 +648,7 @@ Improve the performance and reduce memory usage of hash joins and GROUP BY (Davi -This also improves hash set operations used by EXCEPT, and hash lookups of subplan values. +This also improves hash set operations used by EXCEPT, and hash lookups of subplan values. @@ -666,7 +667,7 @@ Allow normal vacuums to freeze some pages, even though they are all-visible (Mel -This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting vacuum_max_eager_freeze_failure_rate. +This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting vacuum_max_eager_freeze_failure_rate. Previously vacuum never processed all-visible pages until freezing was required. @@ -678,7 +679,7 @@ Author: Nathan Bossart -Add server variable vacuum_truncate to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh) +Add server variable vacuum_truncate to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh) § @@ -696,7 +697,7 @@ Author: Melanie Plageman -Increase server variables effective_io_concurrency's and maintenance_io_concurrency's default values to 16 (Melanie Plageman) +Increase server variables effective_io_concurrency's and maintenance_io_concurrency's default values to 16 (Melanie Plageman) § § @@ -722,7 +723,7 @@ Author: Melanie Plageman -Increase the logging granularity of server variable log_connections (Melanie Plageman) +Increase the logging granularity of server variable log_connections (Melanie Plageman) § @@ -738,7 +739,7 @@ Author: Melanie Plageman -Add log_connections option to report the duration of connection stages (Melanie Plageman) +Add log_connections option to report the duration of connection stages (Melanie Plageman) § @@ -750,7 +751,7 @@ Author: Tom Lane -Add log_line_prefix escape "%L" to output the client IP address (Greg Sabino Mullane) +Add log_line_prefix escape %L to output the client IP address (Greg Sabino Mullane) § @@ -762,12 +763,12 @@ Author: Fujii Masao -Add server variable log_lock_failures to log lock acquisition failures (Yuki Seino) +Add server variable log_lock_failures to log lock acquisition failures (Yuki Seino) § -Specifically it reports SELECT ... NOWAIT lock failures. +Specifically it reports SELECT ... NOWAIT lock failures. @@ -778,12 +779,12 @@ Author: Michael Paquier -Modify pg_stat_all_tables and its variants to report the time spent in vacuum, analyze, and their automatic variants (Sami Imseih) +Modify pg_stat_all_tables and its variants to report the time spent in VACUUM, ANALYZE, and their automatic variants (Sami Imseih) § -The new columns are total_vacuum_time, total_autovacuum_time, total_analyze_time, and total_autoanalyze_time. +The new columns are total_vacuum_time, total_autovacuum_time, total_analyze_time, and total_autoanalyze_time. @@ -796,14 +797,14 @@ Author: Nathan Bossart -Add delay time reporting to VACUUM and ANALYZE (Bertrand Drouvot, Nathan Bossart) +Add delay time reporting to VACUUM and ANALYZE (Bertrand Drouvot, Nathan Bossart) § § -This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE -mode; tracking must be enabled with the server variable track_cost_delay_timing. +This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE +mode; tracking must be enabled with the server variable track_cost_delay_timing. @@ -816,7 +817,7 @@ Author: Masahiko Sawada -Add WAL, CPU, and average read statistics output to ANALYZE VERBOSE (Anthonin Bonnefoy) +Add WAL, CPU, and average read statistics output to ANALYZE VERBOSE (Anthonin Bonnefoy) § § @@ -829,7 +830,7 @@ Author: Michael Paquier -Add full WAL buffer count to VACUUM/ANALYZE (VERBOSE) and autovacuum log output (Bertrand Drouvot) +Add full WAL buffer count to VACUUM/ANALYZE (VERBOSE) and autovacuum log output (Bertrand Drouvot) § @@ -849,7 +850,7 @@ Add per-backend I/O statistics reporting (Bertrand Drouvot) -The statistics are accessed via pg_stat_get_backend_io(). Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). +The statistics are accessed via pg_stat_get_backend_io(). Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). @@ -860,12 +861,12 @@ Author: Michael Paquier -Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz) +Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz) § -The new columns are read_bytes, write_bytes, and extend_bytes. The op_bytes column, which always equaled BLCKSZ, has been removed. +The new columns are read_bytes, write_bytes, and extend_bytes. The op_bytes column, which always equaled BLCKSZ, has been removed. @@ -880,14 +881,14 @@ Author: Michael Paquier -Add WAL I/O activity rows to pg_stat_io (Nazir Bilal Yavuz, Bertrand Drouvot, Michael Paquier) +Add WAL I/O activity rows to pg_stat_io (Nazir Bilal Yavuz, Bertrand Drouvot, Michael Paquier) § § § -This includes WAL receiver activity and a wait event for such writes. +This includes WAL receiver activity and a wait event for such writes. @@ -899,7 +900,7 @@ Author: Michael Paquier -Change server variable track_wal_io_timing to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) +Change server variable track_wal_io_timing to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) § @@ -913,13 +914,13 @@ Author: Michael Paquier -Remove read/sync columns from pg_stat_wal (Bertrand Drouvot) +Remove read/sync columns from pg_stat_wal (Bertrand Drouvot) § § -This removes columns wal_write, wal_sync, wal_write_time, and wal_sync_time. +This removes columns wal_write, wal_sync, wal_write_time, and wal_sync_time. @@ -930,12 +931,12 @@ Author: Michael Paquier -Add function pg_stat_get_backend_wal() to return per-backend WAL statistics (Bertrand Drouvot) +Add function pg_stat_get_backend_wal() to return per-backend WAL statistics (Bertrand Drouvot) § -Per-backend WAL statistics can be cleared via pg_stat_reset_backend_stats(). +Per-backend WAL statistics can be cleared via pg_stat_reset_backend_stats(). @@ -946,7 +947,7 @@ Author: Nathan Bossart -Add function pg_ls_summariesdir() to specifically list the contents of PGDATA/pg_wal/summaries (Yushi Ogiwara) +Add function pg_ls_summariesdir() to specifically list the contents of PGDATA/pg_wal/summaries (Yushi Ogiwara) § @@ -958,12 +959,12 @@ Author: Fujii Masao -Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov) +Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov) § -Columns num_timed and num_requested count both completed and skipped checkpoints. +Columns num_timed and num_requested count both completed and skipped checkpoints. @@ -974,12 +975,12 @@ Author: Fujii Masao -Add column pg_stat_checkpointer.slru_written to report SLRU buffers written (Nitin Jadhav) +Add column pg_stat_checkpointer.slru_written to report SLRU buffers written (Nitin Jadhav) § -Also, modify the checkpoint server log message to report separate shared buffer and SLRU buffer values. +Also, modify the checkpoint server log message to report separate shared buffer and SLRU buffer values. @@ -990,12 +991,12 @@ Author: Michael Paquier -Add columns to pg_stat_database to report parallel workers activity (Benoit Lobréau) +Add columns to pg_stat_database to report parallel workers activity (Benoit Lobréau) § -The new columns are parallel_workers_to_launch and parallel_workers_launched. +The new columns are parallel_workers_to_launch and parallel_workers_launched. @@ -1014,7 +1015,7 @@ Have query jumbling of arrays consider only the first and last array elements (D -Jumbling is used by pg_stat_statements. +Jumbling is used by pg_stat_statements. @@ -1041,7 +1042,7 @@ Author: David Rowley -Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley) +Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley) § @@ -1053,7 +1054,7 @@ Author: David Rowley -Add column pg_backend_memory_contexts.path to show memory context parents (Melih Mutlu) +Add column pg_backend_memory_contexts.path to show memory context parents (Melih Mutlu) § @@ -1076,7 +1077,7 @@ Author: Michael Paquier -Add function pg_get_acl() to retrieve database access control details (Joel Jacobson) +Add function pg_get_acl() to retrieve database access control details (Joel Jacobson) § § @@ -1089,7 +1090,7 @@ Author: Fujii Masao -Add function has_largeobject_privilege() to check large object privileges (Yugo Nagata) +Add function has_largeobject_privilege() to check large object privileges (Yugo Nagata) § @@ -1101,7 +1102,7 @@ Author: Fujii Masao -Allow ALTER DEFAULT PRIVILEGES to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe) +Allow ALTER DEFAULT PRIVILEGES to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe) § @@ -1113,7 +1114,7 @@ Author: Nathan Bossart -Add predefined role pg_signal_autovacuum_worker (Kirill Reshke) +Add predefined role pg_signal_autovacuum_worker (Kirill Reshke) § @@ -1143,8 +1144,8 @@ Add support for the OAuth authentication method (Jacob Champion, Daniel Gustafss -This adds an "oauth" authentication method to pg_hba.conf, libpq OAuth options, a server variable oauth_validator_libraries to load token validation libraries, and -a configure flag --with-libcurl to add the required compile-time libraries. +This adds an oauth authentication method to pg_hba.conf, libpq OAuth options, a server variable oauth_validator_libraries to load token validation libraries, and +a configure flag to add the required compile-time libraries. @@ -1155,7 +1156,7 @@ Author: Daniel Gustafsson -Add server variable ssl_tls13_ciphers to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson) +Add server variable ssl_tls13_ciphers to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson) § @@ -1167,7 +1168,7 @@ Author: Daniel Gustafsson -Change server variable ssl_groups's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) +Change server variable ssl_groups's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) § @@ -1179,7 +1180,7 @@ Author: Daniel Gustafsson -Rename server variable ssl_ecdh_curve to ssl_groups and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson) +Rename server variable ssl_ecdh_curve to ssl_groups and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson) § @@ -1194,7 +1195,7 @@ Author: Daniel Gustafsson -Add function pg_check_fipsmode() to report the server's FIPS mode (Daniel Gustafsson) +Add function pg_check_fipsmode() to report the server's FIPS mode (Daniel Gustafsson) § @@ -1225,12 +1226,12 @@ Author: Nathan Bossart -Add server variable autovacuum_worker_slots to specify the maximum number of background workers (Nathan Bossart) +Add server variable autovacuum_worker_slots to specify the maximum number of background workers (Nathan Bossart) § -With this variable set, autovacuum_max_workers can be adjusted at runtime up to this maximum without a server restart. +With this variable set, autovacuum_max_workers can be adjusted at runtime up to this maximum without a server restart. @@ -1246,7 +1247,7 @@ Allow specification of the fixed number of dead tuples that will trigger an auto -The server variable is autovacuum_vacuum_max_threshold. Percentages are still used for triggering. +The server variable is autovacuum_vacuum_max_threshold. Percentages are still used for triggering. @@ -1257,7 +1258,7 @@ Author: Andres Freund -Change server variable max_files_per_process to limit only files opened by a backend (Andres Freund) +Change server variable max_files_per_process to limit only files opened by a backend (Andres Freund) § @@ -1273,7 +1274,7 @@ Author: Nathan Bossart -Add server variable num_os_semaphores to report the required number of semaphores (Nathan Bossart) +Add server variable num_os_semaphores to report the required number of semaphores (Nathan Bossart) § @@ -1291,7 +1292,7 @@ Author: Peter Eisentraut -Add server variable extension_control_path to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara) +Add server variable extension_control_path to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara) § § @@ -1313,7 +1314,7 @@ Author: Amit Kapila -Allow inactive replication slots to be automatically invalided using server variable idle_replication_slot_timeout (Nisha Moond, Bharath Rupireddy) +Allow inactive replication slots to be automatically invalided using server variable idle_replication_slot_timeout (Nisha Moond, Bharath Rupireddy) § @@ -1325,12 +1326,12 @@ Author: Masahiko Sawada -Add server variable max_active_replication_origins to control the maximum active replication origins (Euler Taveira) +Add server variable max_active_replication_origins to control the maximum active replication origins (Euler Taveira) § -This was previously controlled by max_replication_slots, but this new setting allows a higher origin count in cases where fewer slots are required. +This was previously controlled by max_replication_slots, but this new setting allows a higher origin count in cases where fewer slots are required. @@ -1364,9 +1365,9 @@ Allow the values of generated columns to be logically replicated (Shubham Khanna -If the publication specifies a column list, all specified columns, generated and non-generated, are published. Without a specified column list, publication option publish_generated_columns +If the publication specifies a column list, all specified columns, generated and non-generated, are published. Without a specified column list, publication option publish_generated_columns controls whether generated columns are published. Previously generated columns were not replicated and the subscriber had to compute the values if possible; this is particularly -useful for non-Postgres subscribers which lack such a capability. +useful for non-PostgreSQL subscribers which lack such a capability. @@ -1377,7 +1378,7 @@ Author: Amit Kapila -Change the default CREATE SUBSCRIPTION streaming option from "off" to "parallel" (Vignesh C) +Change the default CREATE SUBSCRIPTION streaming option from off to parallel (Vignesh C) § @@ -1391,7 +1392,7 @@ Author: Amit Kapila -Allow ALTER SUBSCRIPTION to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou) +Allow ALTER SUBSCRIPTION to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou) § § @@ -1421,7 +1422,7 @@ Log conflicts while applying logical replication changes (Zhijie Hou, Nisha Moon -Also report in new columns of pg_stat_subscription_stats. +Also report in new columns of pg_stat_subscription_stats. @@ -1454,7 +1455,7 @@ Allow generated columns to be virtual, and make them the default (Peter Eisentra -Virtual generated columns generate their values when the columns are read, not written. The write behavior can still be specified via the STORED option. +Virtual generated columns generate their values when the columns are read, not written. The write behavior can still be specified via the STORED option. @@ -1465,13 +1466,13 @@ Author: Dean Rasheed -Add OLD/NEW support to RETURNING in DML queries (Dean Rasheed) +Add OLD/NEW support to RETURNING in DML queries (Dean Rasheed) § -Previously RETURNING only returned new values for INSERT and UPDATE, and old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax -allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases "old" and "new". These aliases can be renamed to +Previously RETURNING only returned new values for INSERT and UPDATE, and old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax +allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases old and new. These aliases can be renamed to avoid identifier conflicts. @@ -1488,7 +1489,7 @@ Allow foreign tables to be created like existing local tables (Zhang Mingli) -The syntax is CREATE FOREIGN TABLE ... LIKE. +The syntax is CREATE FOREIGN TABLE ... LIKE. @@ -1499,7 +1500,7 @@ Author: Peter Eisentraut -Allow LIKE with nondeterministic collations (Peter Eisentraut) +Allow LIKE with nondeterministic collations (Peter Eisentraut) § @@ -1527,7 +1528,7 @@ Author: Jeff Davis -Add builtin collation provider PG_UNICODE_FAST (Jeff Davis) +Add builtin collation provider PG_UNICODE_FAST (Jeff Davis) § @@ -1543,12 +1544,12 @@ Author: David Rowley -Allow VACUUM and ANALYZE to process partitioned tables without processing their children (Michael Harris) +Allow VACUUM and ANALYZE to process partitioned tables without processing their children (Michael Harris) § -This is enabled with the new ONLY option. This is useful since autovacuum does not process partitioned tables, just its children. +This is enabled with the new ONLY option. This is useful since autovacuum does not process partitioned tables, just its children. @@ -1570,7 +1571,7 @@ Add functions to modify per-relation and per-column optimizer statistics (Corey -The functions are pg_restore_relation_stats(), pg_restore_attribute_stats(), pg_clear_relation_stats(), and pg_clear_attribute_stats. +The functions are pg_restore_relation_stats(), pg_restore_attribute_stats(), pg_clear_relation_stats(), and pg_clear_attribute_stats(). @@ -1582,12 +1583,12 @@ Author: Thomas Munro -Add server variable file_copy_method to control the file copying method (Nazir Bilal Yavuz) +Add server variable file_copy_method to control the file copying method (Nazir Bilal Yavuz) § -This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone. +This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone. @@ -1605,12 +1606,12 @@ Author: Peter Eisentraut -Allow the specification of non-overlapping PRIMARY KEY and UNIQUE constraints (Paul A. Jungwirth) +Allow the specification of non-overlapping PRIMARY KEY and UNIQUE constraints (Paul A. Jungwirth) § -This is specified by WITHOUT OVERLAPS on the last column. +This is specified by WITHOUT OVERLAPS on the last column. @@ -1623,14 +1624,13 @@ Author: Peter Eisentraut -Allow CHECK and foreign key constraints to be specified as NOT ENFORCED (Amul Sul) +Allow CHECK and foreign key constraints to be specified as NOT ENFORCED (Amul Sul) § § -This also adds column pg_constraint.conenforced. - +This also adds column pg_constraint.conenforced. @@ -1980,7 +1981,7 @@ Author: Tom Lane -Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He) +Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He) § @@ -1992,7 +1993,7 @@ Author: Michael Paquier -Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev) +Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev) § @@ -2004,7 +2005,7 @@ Author: Nathan Bossart -Add function reverse() to reverse bytea bytes (Aleksander Alekseev) +Add function reverse() to reverse bytea bytes (Aleksander Alekseev) § @@ -2016,12 +2017,12 @@ Author: Dean Rasheed -Allow casting between integer types and bytea (Aleksander Alekseev) +Allow casting between integer types and bytea (Aleksander Alekseev) § -The integer values are stored as bytea two's complement values. +The integer values are stored as bytea two's complement values. @@ -2056,12 +2057,12 @@ Author: Tom Lane -Improve the XML error codes to more closely match the SQL standard (Tom Lane) +Improve the XML error codes to more closely match the SQL standard (Tom Lane) § -These errors are reported via SQLSTATE. +These errors are reported via SQLSTATE. @@ -2081,12 +2082,12 @@ Author: Jeff Davis -Add function CASEFOLD() to allow for more sophisticated case-insensitive matching (Jeff Davis) +Add function CASEFOLD() to allow for more sophisticated case-insensitive matching (Jeff Davis) § -Allows more accurate comparison, i.e., a character can have multiple upper or lower case equivalents, or upper or lower case conversion changes the number of characters. +This allows more accurate comparisons, i.e., a character can have multiple upper or lower case equivalents, or upper or lower case conversion changes the number of characters. @@ -2099,7 +2100,7 @@ Author: Tom Lane -Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov) +Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov) § § @@ -2112,7 +2113,7 @@ Author: Tom Lane -Add a WEEK option to EXTRACT() (Tom Lane) +Add a WEEK option to EXTRACT() (Tom Lane) § @@ -2124,7 +2125,7 @@ Author: Tom Lane -Improve the output EXTRACT(QUARTER ...) for negative values (Tom Lane) +Improve the output EXTRACT(QUARTER ...) for negative values (Tom Lane) § @@ -2136,12 +2137,12 @@ Author: Tom Lane -Add roman numeral support to to_number() (Hunaid Sohail) +Add roman numeral support to to_number() (Hunaid Sohail) § -This is accessed via the "RN" pattern. +This is accessed via the RN pattern. @@ -2152,12 +2153,12 @@ Author: Masahiko Sawada -Add UUID version 7 generation function uuidv7() (Andrey Borodin) +Add UUID version 7 generation function uuidv7() (Andrey Borodin) § -This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs. +This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs. @@ -2168,7 +2169,7 @@ Author: Nathan Bossart -Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev) +Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev) § @@ -2180,7 +2181,7 @@ Author: Dean Rasheed -Add math functions gamma() and lgamma() (Dean Rasheed) +Add math functions gamma() and lgamma() (Dean Rasheed) § @@ -2192,12 +2193,12 @@ Author: Tom Lane -Allow "=>" syntax for named cursor arguments in plpgsql (Pavel Stehule) +Allow => syntax for named cursor arguments in plpgsql (Pavel Stehule) § -We previously only accepted ":=". +We previously only accepted :=. @@ -2208,7 +2209,7 @@ Author: Tom Lane -Allow regexp_match[es]/regexp_like/regexp_replace/regexp_count/regexp_instr/regexp_substr/regexp_split_to_table/regexp_split_to_array() to use named arguments (Jian He) +Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() to use named arguments (Jian He) § @@ -2229,7 +2230,7 @@ Author: Robert Haas -Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio) +Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio) § @@ -2256,7 +2257,7 @@ Author: Michael Paquier -Add libpq function PQservice() to return the connection service name (Michael Banck) +Add libpq function PQservice() to return the connection service name (Michael Banck) § @@ -2270,7 +2271,7 @@ Author: Tomas Vondra -Report search_path changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) +Report search_path changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) § § @@ -2291,7 +2292,7 @@ Author: Álvaro Herrera -Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio) +Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio) § § § @@ -2307,7 +2308,7 @@ Author: Daniel Gustafsson -Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson) +Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson) § @@ -2323,12 +2324,12 @@ Author: Thomas Munro -Modify some libpq function signatures to use int64_t (Thomas Munro) +Modify some libpq function signatures to use int64_t (Thomas Munro) § -These previously used pg_int64, which is now deprecated. +These previously used pg_int64, which is now deprecated. @@ -2348,12 +2349,12 @@ Author: Michael Paquier -Allow psql to parse, bind, and close named prepared statements (Anthonin Bonnefoy, Michael Paquier) +Allow psql to parse, bind, and close named prepared statements (Anthonin Bonnefoy, Michael Paquier) § -This is accomplished with new commands \parse, \bind_named, and \close. +This is accomplished with new commands \parse, \bind_named, and \close. @@ -2368,14 +2369,14 @@ Author: Michael Paquier -Add psql backslash commands to allowing issuance of pipeline queries (Anthonin Bonnefoy) +Add psql backslash commands to allowing issuance of pipeline queries (Anthonin Bonnefoy) § § § -The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults. +The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults. @@ -2386,12 +2387,12 @@ Author: Michael Paquier -Allow adding pipeline status to the psql prompt and add related state variables (Anthonin Bonnefoy) +Allow adding pipeline status to the psql prompt and add related state variables (Anthonin Bonnefoy) § -The new prompt character is "%P" and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT. +The new prompt character is %P and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT. @@ -2402,7 +2403,7 @@ Author: Michael Paquier -Allow adding the connection service name to the psql prompt or access it via psql variable (Michael Banck) +Allow adding the connection service name to the psql prompt or access it via psql variable (Michael Banck) § @@ -2414,12 +2415,12 @@ Author: Dean Rasheed -Add psql option to use expanded mode on all list commands (Dean Rasheed) +Add psql option to use expanded mode on all list commands (Dean Rasheed) § -Adding 'x' enables this. +Adding backslash suffix x enables this. @@ -2430,7 +2431,7 @@ Author: Álvaro Herrera -Change psql's \conninfo to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail) +Change psql's \conninfo to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail) § @@ -2442,7 +2443,7 @@ Author: Dean Rasheed -Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata) +Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata) § @@ -2454,7 +2455,7 @@ Author: Michael Paquier -Add access method details for partitioned relations in \dP+ (Justin Pryzby) +Add access method details for partitioned relations in \dP+ (Justin Pryzby) § @@ -2466,7 +2467,7 @@ Author: Magnus Hagander -Add "default_version" to the psql \dx extension output (Magnus Hagander) +Add default_version to the psql \dx extension output (Magnus Hagander) § @@ -2478,7 +2479,7 @@ Author: Daniel Gustafsson -Add psql variable WATCH_INTERVAL to set the default \watch wait time (Daniel Gustafsson) +Add psql variable WATCH_INTERVAL to set the default \watch wait time (Daniel Gustafsson) § @@ -2501,13 +2502,13 @@ Author: Peter Eisentraut -Change initdb to default to enabling checksums (Greg Sabino Mullane) +Change initdb to default to enabling checksums (Greg Sabino Mullane) § § -The new initdb option --no-data-checksums disables checksums. +The new initdb option disables checksums. @@ -2518,12 +2519,12 @@ Author: Nathan Bossart -Add initdb option --no-sync-data-files to avoid syncing heap/index files (Nathan Bossart) +Add initdb option to avoid syncing heap/index files (Nathan Bossart) § -initdb --no-sync is still available to avoid syncing any files. +initdb option is still available to avoid syncing any files. @@ -2536,13 +2537,13 @@ Author: Nathan Bossart -Add vacuumdb option --missing-stats-only to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart) +Add vacuumdb option to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart) § § -This option can only be used by --analyze-only and --analyze-in-stages. +This option can only be used by and . @@ -2553,7 +2554,7 @@ Author: Robert Haas -Add pg_combinebackup option -k/--link to enable hard linking (Israel Barth Rubio, Robert Haas) +Add pg_combinebackup option / to enable hard linking (Israel Barth Rubio, Robert Haas) § @@ -2569,7 +2570,7 @@ Author: Robert Haas -Allow pg_verifybackup to verify tar-format backups (Amul Sul) +Allow pg_verifybackup to verify tar-format backups (Amul Sul) § @@ -2581,7 +2582,7 @@ Author: Masahiko Sawada -If pg_rewind's --source-server specifies a database name, use it in --write-recovery-conf output (Masahiko Sawada) +If pg_rewind's specifies a database name, use it in output (Masahiko Sawada) § @@ -2593,7 +2594,7 @@ Author: Masahiko Sawada -Add pg_resetwal option --char-signedness to change the default char signedness (Masahiko Sawada) +Add pg_resetwal option to change the default char signedness (Masahiko Sawada) § @@ -2616,12 +2617,12 @@ Author: Andrew Dunstan -Allow pg_dumpall to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan) +Allow pg_dumpall to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan) § -Also modify pg_restore to handle such dumps. Previously pg_dumpall only supported text format. +Also modify pg_restore to handle such dumps. Previously pg_dumpall only supported text format. @@ -2632,7 +2633,7 @@ Author: Jeff Davis -Add pg_dump options --with-schema, --with-data, and --with-statistics (Jeff Davis) +Add pg_dump options , , and (Jeff Davis) § @@ -2646,7 +2647,7 @@ Author: Nathan Bossart -Add pg_dump and pg_dumpall option --sequence-data to dump sequence data that would normally be excluded (Nathan Bossart) +Add pg_dump and pg_dumpall option to dump sequence data that would normally be excluded (Nathan Bossart) § § @@ -2659,7 +2660,7 @@ Author: Jeff Davis -Add pg_dump, pg_dumpall, and pg_restore options --statistics-only, --no-statistics, --no-data, and --no-schema (Corey Huinker, Jeff Davis) +Add pg_dump, pg_dumpall, and pg_restore options , , , and (Corey Huinker, Jeff Davis) § @@ -2671,7 +2672,7 @@ Author: Tom Lane -Add option --no-policies to disable row level security policy processing in pg_dump, pg_dumpall, pg_restore (Nikolay Samokhvalov) +Add option to disable row level security policy processing in pg_dump, pg_dumpall, pg_restore (Nikolay Samokhvalov) § @@ -2702,7 +2703,7 @@ Author: Jeff Davis -Allow pg_upgrade to preserve optimizer statistics (Corey Huinker, Jeff Davis, Nathan Bossart) +Allow pg_upgrade to preserve optimizer statistics (Corey Huinker, Jeff Davis, Nathan Bossart) § § § @@ -2710,7 +2711,7 @@ Allow pg_upgrade to preserve optimizer statistics (Corey Huinker, Jeff Davis, Na -Extended statistics are not preserved. Also add pg_upgrade option --no-statistics to disable statistics preservation. +Extended statistics are not preserved. Also add pg_upgrade option to disable statistics preservation. @@ -2741,7 +2742,7 @@ Author: Nathan Bossart -Allow pg_upgrade to process database checks in parallel (Nathan Bossart) +Allow pg_upgrade to process database checks in parallel (Nathan Bossart) § § § @@ -2756,7 +2757,7 @@ Allow pg_upgrade to process database checks in parallel (Nathan Bossart) -This is controlled by the existing --jobs option. +This is controlled by the existing option. @@ -2767,7 +2768,7 @@ Author: Nathan Bossart -Add pg_upgrade option --swap to swap directories rather than copy, clone, or link files (Nathan Bossart) +Add pg_upgrade option to swap directories rather than copy, clone, or link files (Nathan Bossart) § @@ -2785,13 +2786,13 @@ Author: Masahiko Sawada -Add pg_upgrade option --set-char-signedness to set the default char signedness of new cluster (Masahiko Sawada) +Add pg_upgrade option to set the default char signedness of new cluster (Masahiko Sawada) § § -This is to handle cases where a pre-Postgres 18 cluster's default CPU signedness does not match the new cluster. +This is to handle cases where a pre-PostgreSQL 18 cluster's default CPU signedness does not match the new cluster. @@ -2811,7 +2812,7 @@ Author: Amit Kapila -Add pg_createsubscriber option --all to create logical replicas for all databases (Shubham Khanna) +Add pg_createsubscriber option to create logical replicas for all databases (Shubham Khanna) § @@ -2823,7 +2824,7 @@ Author: Amit Kapila -Add pg_createsubscriber option --remove to remove publications (Shubham Khanna) +Add pg_createsubscriber option to remove publications (Shubham Khanna) § @@ -2835,7 +2836,7 @@ Author: Amit Kapila -Add pg_createsubscriber option --enable-two-phase to enable prepared transactions (Shubham Khanna) +Add pg_createsubscriber option to enable prepared transactions (Shubham Khanna) § @@ -2847,7 +2848,7 @@ Author: Masahiko Sawada -Add pg_recvlogical option --failover to specify failover slots (Hayato Kuroda) +Add pg_recvlogical option to specify failover slots (Hayato Kuroda) § @@ -2859,7 +2860,7 @@ Author: Fujii Masao -Allow pg_recvlogical --drop-slot to work without --dbname (Hayato Kuroda) +Allow pg_recvlogical to work without (Hayato Kuroda) § @@ -2890,7 +2891,7 @@ Separate the loading and running of injection points (Michael Paquier, Heikki Li -Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED(). +Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED(). @@ -2913,7 +2914,7 @@ Author: Heikki Linnakangas -Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas) +Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas) § @@ -2925,7 +2926,7 @@ Author: David Rowley -Improve the performance of processing long JSON strings using SIMD instructions (David Rowley) +Improve the performance of processing long JSON strings using SIMD instructions (David Rowley) § @@ -2937,7 +2938,7 @@ Author: John Naylor -Speed up CRC32C calculations using x86 AVX-512 instructions (Raghuveer Devulapalli, Paul Amonson) +Speed up CRC32C calculations using x86 AVX-512 instructions (Raghuveer Devulapalli, Paul Amonson) § @@ -2951,7 +2952,7 @@ Author: Nathan Bossart -Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranmoy Bhattacharya, Devanga Susmitha, Rama Malladi) +Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranmoy Bhattacharya, Devanga Susmitha, Rama Malladi) § § @@ -2989,15 +2990,15 @@ Author: Tomas Vondra -Add configure option --with-libnuma to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot) +Add configure option to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot) § § § -The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across -NUMA nodes. +The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across +NUMA nodes. @@ -3008,7 +3009,7 @@ Author: Nathan Bossart -Add TOAST table to pg_index to allow for very large index expression indexes (Nathan Bossart) +Add TOAST table to pg_index to allow for very large expression indexes (Nathan Bossart) § @@ -3020,7 +3021,8 @@ Author: David Rowley -Remove column pg_attribute.attcacheoff (David Rowley) +Remove column +pg_attribute.attcacheoff (David Rowley) § @@ -3032,7 +3034,7 @@ Author: Melanie Plageman -Add column pg_class.relallfrozen (Melanie Plageman) +Add column pg_class.relallfrozen (Melanie Plageman) § @@ -3046,7 +3048,7 @@ Author: Peter Eisentraut -Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger) +Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger) § § @@ -3059,7 +3061,7 @@ Author: Peter Eisentraut -Add GiST support function stratnum (Paul A. Jungwirth) +Add GiST support function stratnum() (Paul A. Jungwirth) § @@ -3071,7 +3073,7 @@ Author: Masahiko Sawada -Record the default CPU signedness of "char" in pg_controldata (Masahiko Sawada) +Record the default CPU signedness of char in pg_controldata (Masahiko Sawada) § @@ -3085,13 +3087,13 @@ Author: Peter Eisentraut -Add support for Python "Limited API" in PL/Python (Peter Eisentraut) +Add support for Python "Limited API" in PL/Python (Peter Eisentraut) § § -This helps prevent problems caused by Python 3.x version mismatches. +This helps prevent problems caused by Python 3.x version mismatches. @@ -3102,7 +3104,7 @@ Author: Jacob Champion -Change the minimum supported Python version to 3.6.8 (Jacob Champion) +Change the minimum supported Python version to 3.6.8 (Jacob Champion) § @@ -3116,7 +3118,7 @@ Author: Daniel Gustafsson -Remove support for OpenSSL versions older than 1.1.1 (Daniel Gustafsson) +Remove support for OpenSSL versions older than 1.1.1 (Daniel Gustafsson) § § @@ -3129,7 +3131,7 @@ Author: Peter Eisentraut -If LLVM is enabled, require version 14 or later (Thomas Munro) +If LLVM is enabled, require version 14 or later (Thomas Munro) § @@ -3141,12 +3143,12 @@ Author: Tom Lane -Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov) +Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov) § -This information can be access via the new function pg_get_loaded_modules(). +This information can be access via the new function pg_get_loaded_modules(). @@ -3157,12 +3159,12 @@ Author: Tom Lane -Document that SPI_connect/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin) +Document that SPI_connect()/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin) § -Errors are always reported via ereport(). +Errors are always reported via ereport(). @@ -3173,7 +3175,7 @@ Author: Peter Eisentraut -Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev) +Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev) § @@ -3185,7 +3187,7 @@ Author: Peter Eisentraut -Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut) +Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut) § @@ -3199,13 +3201,13 @@ Author: Thomas Munro -Remove configure options --disable-spinlocks and --disable-atomics (Thomas Munro) +Remove configure options and (Thomas Munro) § § -Thirty-two bit atomic operations are now required. +Thirty-two-bit atomic operations are now required. @@ -3216,7 +3218,7 @@ Author: Tom Lane -Remove support for the HPPA/PA-RISC architecture (Tom Lane) +Remove support for the HPPA/PA-RISC architecture (Tom Lane) § @@ -3237,7 +3239,7 @@ Author: Masahiko Sawada -Add extension pg_logicalinspect to inspect logical snapshots (Bertrand Drouvot) +Add extension pg_logicalinspect to inspect logical snapshots (Bertrand Drouvot) § @@ -3249,7 +3251,7 @@ Author: Robert Haas -Add extension pg_overexplain which adds debug details to EXPLAIN output (Robert Haas) +Add extension pg_overexplain which adds debug details to EXPLAIN output (Robert Haas) § @@ -3267,7 +3269,7 @@ Author: Fujii Masao -Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge) +Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge) § § § @@ -3275,8 +3277,8 @@ Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip -New output column "used_in_xact" indicates if the foreign data wrapper is being used by a current transaction, "closed" indicates if it is closed, "user_name" indicates the -user name, and "remote_backend_pid" indicates the remote backend process identifier. +New output column used_in_xact indicates if the foreign data wrapper is being used by a current transaction, closed indicates if it is closed, user_name indicates the +user name, and remote_backend_pid indicates the remote backend process identifier. @@ -3287,13 +3289,14 @@ Author: Peter Eisentraut -Allow SCRAM authentication from the client to be passed to postgres_fdw servers (Matheus Alcantara, Peter Eisentraut) +Allow SCRAM authentication from the client to be passed to postgres_fdw servers (Matheus Alcantara, Peter Eisentraut) § -This avoids storing postgres_fdw authentication information in the database, and is enabled with the postgres_fdw "use_scram_passthrough" connection option. libpq uses new connection -parameters scram_client_key and scram_server_key. +This avoids storing postgres_fdw authentication information in the database, and is enabled with the +postgres_fdw use_scram_passthrough connection option. libpq uses new connection +parameters scram_client_key and scram_server_key. @@ -3304,7 +3307,7 @@ Author: Peter Eisentraut -Allow SCRAM authentication from the client to be passed to dblink servers (Matheus Alcantara) +Allow SCRAM authentication from the client to be passed to dblink servers (Matheus Alcantara) § @@ -3316,12 +3319,12 @@ Author: Fujii Masao -Add on_error and log_verbosity options to file_fdw (Atsushi Torikoshi) +Add on_error and log_verbosity options to file_fdw (Atsushi Torikoshi) § -These control how file_fdw handles and reports invalid file rows. +These control how file_fdw handles and reports invalid file rows. @@ -3332,12 +3335,12 @@ Author: Fujii Masao -Add "reject_limit" to control the number of invalid rows file_fdw can ignore (Atsushi Torikoshi) +Add reject_limit to control the number of invalid rows file_fdw can ignore (Atsushi Torikoshi) § -This is active when ON_ERROR = 'ignore'. +This is active when ON_ERROR = 'ignore'. @@ -3348,7 +3351,7 @@ Author: Nathan Bossart -Add configurable variable min_password_length to passwordcheck (Emanuele Musella, Maurizio Boriani) +Add configurable variable min_password_length to passwordcheck (Emanuele Musella, Maurizio Boriani) § @@ -3364,7 +3367,7 @@ Author: Tatsuo Ishii -Have pgbench report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata) +Have pgbench report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata) § @@ -3376,12 +3379,12 @@ Author: Tom Lane -Add isn server variable "weak" to control invalid check digit acceptance (Viktor Holmberg) +Add isn server variable weak to control invalid check digit acceptance (Viktor Holmberg) § -This was previously only controlled by function isn_weak(). +This was previously only controlled by function isn_weak(). @@ -3392,7 +3395,7 @@ Author: Heikki Linnakangas -Allow values to be sorted to speed btree_gist index builds (Bernd Helmle, Andrey Borodin) +Allow values to be sorted to speed btree_gist index builds (Bernd Helmle, Andrey Borodin) § @@ -3404,7 +3407,7 @@ Author: Tomas Vondra -Add amcheck function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin) +Add amcheck function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin) § @@ -3416,12 +3419,12 @@ Author: Andres Freund -Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz) +Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz) § -The existing function pg_buffercache_evict() now returns the buffer flush status. +The existing function pg_buffercache_evict() now returns the buffer flush status. @@ -3436,7 +3439,7 @@ Author: Robert Haas -Allow extensions to install custom EXPLAIN options (Robert Haas, Sami Imseih) +Allow extensions to install custom EXPLAIN options (Robert Haas, Sami Imseih) § § § @@ -3452,7 +3455,7 @@ Author: Michael Paquier -Allow extensions to use the server's cumulative statistics API (Michael Paquier) +Allow extensions to use the server's cumulative statistics API (Michael Paquier) § § @@ -3472,7 +3475,7 @@ Author: Michael Paquier -Allow the queries of CREATE TABLE AS and DECLARE to be tracked by pg_stat_statements (Anthonin Bonnefoy) +Allow the queries of CREATE TABLE AS and DECLARE to be tracked by pg_stat_statements (Anthonin Bonnefoy) § @@ -3488,12 +3491,12 @@ Author: Michael Paquier -Allow the parameterization of SET values in pg_stat_statements (Greg Sabino Mullane, Michael Paquier) +Allow the parameterization of SET values in pg_stat_statements (Greg Sabino Mullane, Michael Paquier) § -This reduces the bloat caused by SET statements with differing constants. +This reduces the bloat caused by SET statements with differing constants. @@ -3504,12 +3507,12 @@ Author: Michael Paquier -Add pg_stat_statements columns to report parallel activity (Guillaume Lelarge) +Add pg_stat_statements columns to report parallel activity (Guillaume Lelarge) § -The new columns are parallel_workers_to_launch and parallel_workers_launched. +The new columns are parallel_workers_to_launch and parallel_workers_launched. @@ -3520,7 +3523,7 @@ Author: Michael Paquier -Add pg_stat_statements.wal_buffers_full to report full WAL buffers (Bertrand Drouvot) +Add pg_stat_statements.wal_buffers_full to report full WAL buffers (Bertrand Drouvot) § @@ -3541,7 +3544,7 @@ Author: Álvaro Herrera -Add pgcrypto functions sha256crypt() and sha512crypt() (Bernd Helmle) +Add pgcrypto functions sha256crypt() and sha512crypt() (Bernd Helmle) § @@ -3553,7 +3556,7 @@ Author: Daniel Gustafsson -Add CFB mode to pgcrypto encryption and decryption (Umar Hayat) +Add CFB mode to pgcrypto encryption and decryption (Umar Hayat) § @@ -3565,12 +3568,12 @@ Author: Daniel Gustafsson -Add pgcrypto server variable builtin_crypto_enabled to allow disabling builtin non-FIPS mode cryptographic functions (Daniel Gustafsson, Joe Conway) +Add pgcrypto server variable builtin_crypto_enabled to allow disabling builtin non-FIPS mode cryptographic functions (Daniel Gustafsson, Joe Conway) § -This is useful for guaranteeing FIPS mode behavior. +This is useful for guaranteeing FIPS mode behavior. From 428a87607b58949cfc35eeab94825e2de0d541a5 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 18 Jun 2025 09:18:40 +0900 Subject: [PATCH 027/181] doc: Reorder protocol version option descriptions in libpq docs. Commit 285613c60a7 introduced the min_protocol_version and max_protocol_version connection options for libpq, but their descriptions were placed in the middle of the unrelated ssl_min_protocol_version and ssl_max_protocol_version entries. This commit moves the min_protocol_version and max_protocol_version descriptions to appear after the SSL-related options. This improves the logical order and makes it easier for users to locate the relevant settings in the libpq documentation. Author: Fujii Masao Reviewed-by: Jelte Fennema-Nio Discussion: https://postgr.es/m/a3391f36-30f5-4d4a-825b-232476819de8@oss.nttdata.com --- doc/src/sgml/libpq.sgml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 695fe958c3ed3..08bd51219262d 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -2168,6 +2168,24 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname + + ssl_max_protocol_version + + + This parameter specifies the maximum SSL/TLS protocol version to allow + for the connection. Valid values are TLSv1, + TLSv1.1, TLSv1.2 and + TLSv1.3. The supported protocols depend on the + version of OpenSSL used, older versions + not supporting the most modern protocol versions. If not set, this + parameter is ignored and the connection will use the maximum bound + defined by the backend, if set. Setting the maximum protocol version + is mainly useful for testing or if some component has issues working + with a newer protocol. + + + + min_protocol_version @@ -2216,24 +2234,6 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname - - ssl_max_protocol_version - - - This parameter specifies the maximum SSL/TLS protocol version to allow - for the connection. Valid values are TLSv1, - TLSv1.1, TLSv1.2 and - TLSv1.3. The supported protocols depend on the - version of OpenSSL used, older versions - not supporting the most modern protocol versions. If not set, this - parameter is ignored and the connection will use the maximum bound - defined by the backend, if set. Setting the maximum protocol version - is mainly useful for testing or if some component has issues working - with a newer protocol. - - - - krbsrvname From 9e1183953f0aee6b8040cd782a8af9996f5ca942 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Wed, 18 Jun 2025 11:03:21 +0900 Subject: [PATCH 028/181] Document "relrewrite" at the top of heap_create_with_catalog() This parameter has been introduced in 325f2ec5557f, and it was not documented contrary to all the other arguments of heap_create_with_catalog(). Reviewed-by: Yugo Nagata Reviewed-by: Steven Niu Discussion: https://postgr.es/m/aE--bmEv-gJUTH5v@paquier.xyz --- src/backend/catalog/heap.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index fbaed5359ad7c..10f43c51c5af0 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1100,6 +1100,7 @@ AddNewRelationType(const char *typeName, * if false, relacl is always set NULL * allow_system_table_mods: true to allow creation in system namespaces * is_internal: is this a system-generated catalog? + * relrewrite: link to original relation during a table rewrite * * Output parameters: * typaddress: if not null, gets the object address of the new pg_type entry From c2e2589ab969eb802493191c79de844bf7dc3a6e Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 18 Jun 2025 14:53:55 +0900 Subject: [PATCH 029/181] pg_dump: Allow pg_dump to dump the statistics for foreign tables. Commit 1fd1bd87101 introduced support for dumping statistics with pg_dump and pg_dumpall, covering tables, materialized views, and indexes. However, it overlooked foreign tables, even though functions like pg_restore_relation_stats() support them. This commit fixes that oversight by allowing pg_dump and pg_dumpall to include statistics for foreign tables. Author: Fujii Masao Reviewed-by: Corey Huinker Reviewed-by: Nathan Bossart Discussion: https://postgr.es/m/3772e4e4-ef39-4deb-bb76-aa8165f33fb6@oss.nttdata.com --- doc/src/sgml/ref/pg_dump.sgml | 7 ++++--- doc/src/sgml/ref/pg_dumpall.sgml | 3 ++- src/bin/pg_dump/pg_dump.c | 4 +++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index d7595a7e5468d..1e06bd33bdcd1 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -1277,8 +1277,8 @@ PostgreSQL documentation The data section contains actual table data, large-object - contents, statistics for tables and materialized views and - sequence values. + contents, sequence values, and statistics for tables, + materialized views, and foriegn tables. Post-data items include definitions of indexes, triggers, rules, statistics for indexes, and constraints other than validated check constraints. @@ -1359,7 +1359,8 @@ PostgreSQL documentation Dump only the statistics, not the schema (data definitions) or data. - Statistics for tables, materialized views, and indexes are dumped. + Statistics for tables, materialized views, foreign tables, + and indexes are dumped. diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 723a466cfaad6..43f384ed16a9c 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -690,7 +690,8 @@ exclude database PATTERN Dump only the statistics, not the schema (data definitions) or data. - Statistics for tables, materialized views, and indexes are dumped. + Statistics for tables, materialized views, foreign tables, + and indexes are dumped. diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 7bc0724cd301f..a8f0309e8fc1e 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -6890,7 +6890,8 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, (relkind == RELKIND_PARTITIONED_TABLE) || (relkind == RELKIND_INDEX) || (relkind == RELKIND_PARTITIONED_INDEX) || - (relkind == RELKIND_MATVIEW)) + (relkind == RELKIND_MATVIEW || + relkind == RELKIND_FOREIGN_TABLE)) { RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo)); DumpableObject *dobj = &info->dobj; @@ -6929,6 +6930,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages, case RELKIND_RELATION: case RELKIND_PARTITIONED_TABLE: case RELKIND_MATVIEW: + case RELKIND_FOREIGN_TABLE: info->section = SECTION_DATA; break; case RELKIND_INDEX: From 09f7d36ba16e9665bb25d2c097e71c7439485fd7 Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Wed, 18 Jun 2025 16:43:27 -0400 Subject: [PATCH 030/181] doc config.sgml: use "-" and not "_" for varlistentry "id"s Change "id"s of file_copy_method and enable_self_join_elimination for consistency with the rest of the guc "id"s. These are new entries for PG 18. --- doc/src/sgml/config.sgml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 7e0e5400ee128..5ea554ad3c36d 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2363,7 +2363,7 @@ include_dir 'conf.d' - + file_copy_method (enum) file_copy_method configuration parameter @@ -5765,7 +5765,7 @@ ANY num_sync ( + enable_self_join_elimination (boolean) enable_self_join_elimination configuration parameter From d0d1bcb1e8b2e324bc243d69ccfce55b25a79f8c Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Wed, 18 Jun 2025 16:48:26 -0400 Subject: [PATCH 031/181] doc: fix for commit 09f7d36ba16 in changing "_" to "-". I thought underscores wouldn't even work in "id"s, so I never checked to see if anything referenced it, but it seems it does work, so adjust the calling site for the dash syntax. --- doc/src/sgml/ref/alter_database.sgml | 2 +- doc/src/sgml/ref/create_database.sgml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/sgml/ref/alter_database.sgml b/doc/src/sgml/ref/alter_database.sgml index 9da8920e12eff..1fc051e11a311 100644 --- a/doc/src/sgml/ref/alter_database.sgml +++ b/doc/src/sgml/ref/alter_database.sgml @@ -83,7 +83,7 @@ ALTER DATABASE name RESET ALL must be empty for this database, and no one can be connected to the database. Tables and indexes in non-default tablespaces are unaffected. The method used to copy files to the new tablespace - is affected by the setting. + is affected by the setting. diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml index 640c0425faec5..4da8aeebb50a2 100644 --- a/doc/src/sgml/ref/create_database.sgml +++ b/doc/src/sgml/ref/create_database.sgml @@ -140,7 +140,7 @@ CREATE DATABASE name after the creation of the new database. In some situations, this may have a noticeable negative impact on overall system performance. The FILE_COPY strategy is affected by the setting. + linkend="guc-file-copy-method"/> setting. From b57d707708181f988fd1fa697976059510fc4f76 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Thu, 19 Jun 2025 09:07:19 +0900 Subject: [PATCH 032/181] doc: Fix incorrect description of INCLUDING COMMENTS in CREATE FOREIGN TABLE. Commit 302cf157592 added support for LIKE in CREATE FOREIGN TABLE. In this feature, since indexes are not created for foreign tables, comments on indexes are not copied either. However, the documentation incorrectly stated that index comments would be copied when using INCLUDING COMMENTS. This commit corrects that by removing the mention of index comments. Author: Fujii Masao Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/f86cd84f-a6a3-4451-bae7-5cca9e63b06d@oss.nttdata.com --- doc/src/sgml/ref/create_foreign_table.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/ref/create_foreign_table.sgml b/doc/src/sgml/ref/create_foreign_table.sgml index d08834ac9d291..009fa46532bbe 100644 --- a/doc/src/sgml/ref/create_foreign_table.sgml +++ b/doc/src/sgml/ref/create_foreign_table.sgml @@ -232,7 +232,7 @@ WITH ( MODULUS numeric_literal, REM INCLUDING COMMENTS - Comments for the copied columns, constraints, and indexes will be + Comments for the copied columns and constraints will be copied. The default behavior is to exclude comments, resulting in the copied columns and constraints in the new table having no comments. From db0c93f172a41515734a774f0412ff9557eca8ed Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Thu, 19 Jun 2025 09:12:34 +0900 Subject: [PATCH 033/181] doc: Mention GIN indexes support parallel builds. Commit 8492feb98f6 added support for parallel CREATE INDEX on GIN indexes. However, previously two places in the documentation and two in the source code comments still stated that only B-tree and BRIN indexes support parallel builds. This commit updates those references to correctly include GIN indexes. Author: Fujii Masao Reviewed-by: Robert Treat Discussion: https://postgr.es/m/7d27d068-90e2-4022-9bd7-09b0fd3d4f47@oss.nttdata.com --- doc/src/sgml/config.sgml | 3 ++- doc/src/sgml/ref/create_index.sgml | 2 +- src/backend/catalog/index.c | 2 +- src/backend/optimizer/plan/planner.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 5ea554ad3c36d..b265cc89c9d46 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2894,7 +2894,8 @@ include_dir 'conf.d' Sets the maximum number of parallel workers that can be started by a single utility command. Currently, the parallel utility commands that support the use of parallel workers are - CREATE INDEX when building a B-tree or BRIN index, + CREATE INDEX when building a B-tree, + GIN, or BRIN index, and VACUUM without FULL option. Parallel workers are taken from the pool of processes established by , limited diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml index 147a8f7587c71..b9c679c41e8db 100644 --- a/doc/src/sgml/ref/create_index.sgml +++ b/doc/src/sgml/ref/create_index.sgml @@ -814,7 +814,7 @@ Indexes: leveraging multiple CPUs in order to process the table rows faster. This feature is known as parallel index build. For index methods that support building indexes - in parallel (currently, B-tree and BRIN), + in parallel (currently, B-tree, GIN, and BRIN), maintenance_work_mem specifies the maximum amount of memory that can be used by each index build operation as a whole, regardless of how many worker processes were started. diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 739a92bdcc1ca..aa216683b74fe 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3020,7 +3020,7 @@ index_build(Relation heapRelation, /* * Determine worker process details for parallel CREATE INDEX. Currently, - * only btree and BRIN have support for parallel builds. + * only btree, GIN, and BRIN have support for parallel builds. * * Note that planner considers parallel safety for us. */ diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ff65867eebee7..549aedcfa991a 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -6879,7 +6879,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid) * * tableOid is the table on which the index is to be built. indexOid is the * OID of an index to be created or reindexed (which must be an index with - * support for parallel builds - currently btree or BRIN). + * support for parallel builds - currently btree, GIN, or BRIN). * * Return value is the number of parallel worker processes to request. It * may be unsafe to proceed if this is 0. Note that this does not include the From a03805920b36b79b7ddf97c6804117f3296b2900 Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Wed, 18 Jun 2025 21:19:42 -0400 Subject: [PATCH 034/181] doc PG 18 relnotes: add links for server variables --- doc/src/sgml/release-18.sgml | 58 ++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index ab83b1554001e..e89a86b1aa813 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -83,7 +83,7 @@ Change time zone abbreviation handling (Tom Lane) -The system will now favor the current session's time zone abbreviations before checking the server variable timezone_abbreviations. Previously timezone_abbreviations was +The system will now favor the current session's time zone abbreviations before checking the server variable . Previously timezone_abbreviations was checked first. @@ -101,7 +101,7 @@ Deprecate MD5 password authentication (Nathan Bossart) Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords. -These warnings can be disabled by setting the md5_password_warnings parameter to off. +These warnings can be disabled by setting the parameter to off. @@ -263,7 +263,7 @@ Automatically remove some unnecessary table self-joins (Andrey Lepikhov, Alexand -This optimization can be disabled using server variable enable_self_join_elimination. +This optimization can be disabled using server variable . @@ -324,7 +324,7 @@ Allow the keys of SELECT DISTINCT to be internally reordered -This optimization can be disabled using enable_distinct_reordering. +This optimization can be disabled using . @@ -606,8 +606,8 @@ Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavu This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc. -This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables -effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used +This is enabled by server variable , with server variables and added to control it. This also enables + and values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used for asynchronous I/O. @@ -667,7 +667,7 @@ Allow normal vacuums to freeze some pages, even though they are all-visible (Mel -This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting vacuum_max_eager_freeze_failure_rate. +This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting . Previously vacuum never processed all-visible pages until freezing was required. @@ -679,7 +679,7 @@ Author: Nathan Bossart -Add server variable vacuum_truncate to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh) +Add server variable to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh) § @@ -697,7 +697,7 @@ Author: Melanie Plageman -Increase server variables effective_io_concurrency's and maintenance_io_concurrency's default values to 16 (Melanie Plageman) +Increase server variables 's and 's default values to 16 (Melanie Plageman) § § @@ -723,7 +723,7 @@ Author: Melanie Plageman -Increase the logging granularity of server variable log_connections (Melanie Plageman) +Increase the logging granularity of server variable (Melanie Plageman) § @@ -751,7 +751,7 @@ Author: Tom Lane -Add log_line_prefix escape %L to output the client IP address (Greg Sabino Mullane) +Add escape %L to output the client IP address (Greg Sabino Mullane) § @@ -763,7 +763,7 @@ Author: Fujii Masao -Add server variable log_lock_failures to log lock acquisition failures (Yuki Seino) +Add server variable to log lock acquisition failures (Yuki Seino) § @@ -804,7 +804,7 @@ Add delay time reporting to VACUUM and ANALYZE This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE -mode; tracking must be enabled with the server variable track_cost_delay_timing. +mode; tracking must be enabled with the server variable . @@ -900,7 +900,7 @@ Author: Michael Paquier -Change server variable track_wal_io_timing to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) +Change server variable to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) § @@ -1144,7 +1144,7 @@ Add support for the OAuth authentication method (Jacob Champion, Daniel Gustafss -This adds an oauth authentication method to pg_hba.conf, libpq OAuth options, a server variable oauth_validator_libraries to load token validation libraries, and +This adds an oauth authentication method to pg_hba.conf, libpq OAuth options, a server variable to load token validation libraries, and a configure flag to add the required compile-time libraries. @@ -1156,7 +1156,7 @@ Author: Daniel Gustafsson -Add server variable ssl_tls13_ciphers to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson) +Add server variable to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson) § @@ -1168,7 +1168,7 @@ Author: Daniel Gustafsson -Change server variable ssl_groups's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) +Change server variable 's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) § @@ -1180,7 +1180,7 @@ Author: Daniel Gustafsson -Rename server variable ssl_ecdh_curve to ssl_groups and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson) +Rename server variable ssl_ecdh_curve to and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson) § @@ -1226,12 +1226,12 @@ Author: Nathan Bossart -Add server variable autovacuum_worker_slots to specify the maximum number of background workers (Nathan Bossart) +Add server variable to specify the maximum number of background workers (Nathan Bossart) § -With this variable set, autovacuum_max_workers can be adjusted at runtime up to this maximum without a server restart. +With this variable set, can be adjusted at runtime up to this maximum without a server restart. @@ -1247,7 +1247,7 @@ Allow specification of the fixed number of dead tuples that will trigger an auto -The server variable is autovacuum_vacuum_max_threshold. Percentages are still used for triggering. +The server variable is . Percentages are still used for triggering. @@ -1258,7 +1258,7 @@ Author: Andres Freund -Change server variable max_files_per_process to limit only files opened by a backend (Andres Freund) +Change server variable to limit only files opened by a backend (Andres Freund) § @@ -1274,7 +1274,7 @@ Author: Nathan Bossart -Add server variable num_os_semaphores to report the required number of semaphores (Nathan Bossart) +Add server variable to report the required number of semaphores (Nathan Bossart) § @@ -1292,7 +1292,7 @@ Author: Peter Eisentraut -Add server variable extension_control_path to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara) +Add server variable to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara) § § @@ -1314,7 +1314,7 @@ Author: Amit Kapila -Allow inactive replication slots to be automatically invalided using server variable idle_replication_slot_timeout (Nisha Moond, Bharath Rupireddy) +Allow inactive replication slots to be automatically invalided using server variable (Nisha Moond, Bharath Rupireddy) § @@ -1326,12 +1326,12 @@ Author: Masahiko Sawada -Add server variable max_active_replication_origins to control the maximum active replication origins (Euler Taveira) +Add server variable to control the maximum active replication origins (Euler Taveira) § -This was previously controlled by max_replication_slots, but this new setting allows a higher origin count in cases where fewer slots are required. +This was previously controlled by , but this new setting allows a higher origin count in cases where fewer slots are required. @@ -1583,7 +1583,7 @@ Author: Thomas Munro -Add server variable file_copy_method to control the file copying method (Nazir Bilal Yavuz) +Add server variable to control the file copying method (Nazir Bilal Yavuz) § @@ -2271,7 +2271,7 @@ Author: Tomas Vondra -Report search_path changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) +Report changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) § § From 1546e17f9d067e714e066fcdd57d5f56c14f4174 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Thu, 19 Jun 2025 09:48:08 +0530 Subject: [PATCH 035/181] Improve log messages and docs for slot synchronization. Improve the clarity of LOG messages when a failover logical slot synchronization fails, making the reasons more explicit for easier debugging. Update the documentation to outline scenarios where slot synchronization can fail, especially during the initial sync, and emphasize that pg_sync_replication_slot() is primarily intended for testing and debugging purposes. We also discussed improving the functionality of pg_sync_replication_slot() so that it can be used reliably, but we would take up that work for next version after some more discussion and review. Reported-by: Suraj Kharage Author: shveta malik Reviewed-by: Zhijie Hou Reviewed-by: Peter Smith Reviewed-by: Amit Kapila Backpatch-through: 17, where it was introduced Discussion: https://postgr.es/m/CAF1DzPWTcg+m+x+oVVB=y4q9=PYYsL_mujVp7uJr-_oUtWNGbA@mail.gmail.com --- doc/src/sgml/func.sgml | 6 ++- doc/src/sgml/logicaldecoding.sgml | 54 ++++++++++++++++++++-- src/backend/replication/logical/slotsync.c | 6 +-- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index c67688cbf5f98..8d7d9a2f3e8e8 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -29698,7 +29698,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset - + pg_logical_slot_get_binary_changes @@ -29970,7 +29970,9 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset standby server. Temporary synced slots, if any, cannot be used for logical decoding and must be dropped after promotion. See for details. - Note that this function cannot be executed if + Note that this function is primarily intended for testing and + debugging purposes and should be used with caution. Additionaly, + this function cannot be executed if sync_replication_slots is enabled and the slotsync worker is already running to perform the synchronization of slots. diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index dd9e83b08eaf1..5c5957e0d37a1 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -370,10 +370,10 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU pg_create_logical_replication_slot, or by using the failover option of - CREATE SUBSCRIPTION during slot creation, and then calling - - pg_sync_replication_slots - on the standby. By setting + CREATE SUBSCRIPTION during slot creation. + Additionally, enabling + sync_replication_slots on the standby + is required. By enabling sync_replication_slots on the standby, the failover slots can be synchronized periodically in the slotsync worker. For the synchronization to work, it is mandatory to @@ -398,6 +398,52 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU receiving the WAL up to the latest flushed position on the primary server. + + + While enabling + sync_replication_slots allows for automatic + periodic synchronization of failover slots, they can also be manually + synchronized using the + pg_sync_replication_slots function on the standby. + However, this function is primarily intended for testing and debugging and + should be used with caution. Unlike automatic synchronization, it does not + include cyclic retries, making it more prone to synchronization failures, + particularly during initial sync scenarios where the required WAL files + or catalog rows for the slot may have already been removed or are at risk + of being removed on the standby. In contrast, automatic synchronization + via sync_replication_slots provides continuous slot + updates, enabling seamless failover and supporting high availability. + Therefore, it is the recommended method for synchronizing slots. + + + + + When slot synchronization is configured as recommended, + and the initial synchronization is performed either automatically or + manually via pg_sync_replication_slot, the standby can persist the + synchronized slot only if the following condition is met: The logical + replication slot on the primary must retain WALs and system catalog + rows that are still available on the standby. This ensures data + integrity and allows logical replication to continue smoothly after + promotion. + If the required WALs or catalog rows have already been purged from the + standby, the slot will not be persisted to avoid data loss. In such + cases, the following log message may appear: + + LOG: could not synchronize replication slot "failover_slot" + DETAIL: Synchronization could lead to data loss as the remote slot needs WAL at LSN 0/3003F28 and catalog xmin 754, but the standby has LSN 0/3003F28 and catalog xmin 756 + + If the logical replication slot is actively used by a consumer, no + manual intervention is needed; the slot will advance automatically, + and synchronization will resume in the next cycle. However, if no + consumer is configured, it is advisable to manually advance the slot + on the primary using + pg_logical_slot_get_changes or + + pg_logical_slot_get_binary_changes, + allowing synchronization to proceed. + + The ability to resume logical replication after failover depends upon the pg_replication_slots.synced diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c index 656e66e0ae0a1..f1dcbebfa1ae7 100644 --- a/src/backend/replication/logical/slotsync.c +++ b/src/backend/replication/logical/slotsync.c @@ -211,9 +211,9 @@ update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid, * impact the users, so we used DEBUG1 level to log the message. */ ereport(slot->data.persistency == RS_TEMPORARY ? LOG : DEBUG1, - errmsg("could not synchronize replication slot \"%s\" because remote slot precedes local slot", + errmsg("could not synchronize replication slot \"%s\"", remote_slot->name), - errdetail("The remote slot has LSN %X/%X and catalog xmin %u, but the local slot has LSN %X/%X and catalog xmin %u.", + errdetail("Synchronization could lead to data loss as the remote slot needs WAL at LSN %X/%X and catalog xmin %u, but the standby has LSN %X/%X and catalog xmin %u.", LSN_FORMAT_ARGS(remote_slot->restart_lsn), remote_slot->catalog_xmin, LSN_FORMAT_ARGS(slot->data.restart_lsn), @@ -593,7 +593,7 @@ update_and_persist_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid) { ereport(LOG, errmsg("could not synchronize replication slot \"%s\"", remote_slot->name), - errdetail("Logical decoding could not find consistent point from local slot's LSN %X/%X.", + errdetail("Synchronization could lead to data loss as standby could not build a consistent snapshot to decode WALs at LSN %X/%X.", LSN_FORMAT_ARGS(slot->data.restart_lsn))); return false; From dec6643487bbed8f5d771e9b9aff772e5c711d4d Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Thu, 19 Jun 2025 13:53:12 +0200 Subject: [PATCH 036/181] Improve pg_dump/pg_dumpall help synopses and terminology Increase consistency of --help and man page synopses between pg_dump and pg_dumpall. These should now be very similar, as pg_dumpall can now also produce non-text dump output. But actually, they had drifted further apart. - Use verb "export" consistently, instead of "dump" or "extract". - Use "SQL script" instead of just "script" or "text file". - Maintain consistent distinction between SQL script and other formats/archives (which is relevant for pg_restore). Reviewed-by: Robert Treat Discussion: https://www.postgresql.org/message-id/flat/3f71d8a7-095b-4829-9b0b-fce09e9866b3%40eisentraut.org --- doc/src/sgml/ref/pg_dump.sgml | 2 +- doc/src/sgml/ref/pg_dumpall.sgml | 7 +++++-- doc/src/sgml/ref/pg_restore.sgml | 4 ++-- src/bin/pg_dump/pg_dump.c | 2 +- src/bin/pg_dump/pg_dumpall.c | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 1e06bd33bdcd1..0d9270116549a 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -18,7 +18,7 @@ PostgreSQL documentation pg_dump - extract a PostgreSQL database into a script file or other archive file + export a PostgreSQL database as an SQL script or to other formats diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 43f384ed16a9c..8ca68da5a5560 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -16,7 +16,10 @@ PostgreSQL documentation pg_dumpall - extract a PostgreSQL database cluster using a specified dump format + + + export a PostgreSQL database cluster as an SQL script or to other formats + @@ -33,7 +36,7 @@ PostgreSQL documentation pg_dumpall is a utility for writing out (dumping) all PostgreSQL databases - of a cluster into an archive. The archive contains + of a cluster into an SQL script file or an archive. The output contains SQL commands that can be used as input to to restore the databases. It does this by calling for each database in the cluster. diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 8c88b07dcc865..b649bd3a5ae0f 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -18,8 +18,8 @@ PostgreSQL documentation pg_restore - restore a PostgreSQL database or cluster - from an archive created by pg_dump or + restore PostgreSQL databases from archives + created by pg_dump or pg_dumpall diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index a8f0309e8fc1e..db944ec223071 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -1235,7 +1235,7 @@ main(int argc, char **argv) static void help(const char *progname) { - printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname); + printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]... [DBNAME]\n"), progname); diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index b1f388cb39160..3cbcad65c5fb5 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -699,7 +699,7 @@ main(int argc, char *argv[]) static void help(void) { - printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname); + printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]...\n"), progname); From d8aa21b74ff4e3d767c3344484c3cb22b9f0ec0d Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Thu, 19 Jun 2025 11:50:50 -0400 Subject: [PATCH 037/181] doc: add xreflabel text for libpq and PL/Python to be used for PG 18 release notes --- doc/src/sgml/libpq.sgml | 2 +- doc/src/sgml/plpython.sgml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 08bd51219262d..298c4b38ef90a 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,6 +1,6 @@ - + <application>libpq</application> — C Library diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index bee817ea822a2..cb065bf5f88db 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -1,6 +1,6 @@ - + PL/Python — Python Procedural Language PL/Python From ed117c4c6c4feb1362abbb417ac6e6525dd8789b Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Thu, 19 Jun 2025 11:59:00 -0400 Subject: [PATCH 038/181] doc PG 18 relnotes: add links for applications --- doc/src/sgml/release-18.sgml | 64 ++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index e89a86b1aa813..75e17f1a0c6de 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -60,13 +60,13 @@ Author: Peter Eisentraut -Change initdb default to enable data checksums (Greg Sabino Mullane) +Change default to enable data checksums (Greg Sabino Mullane) § Checksums can be disabled with the new initdb option . -pg_upgrade requires matching cluster checksum settings, so this new + requires matching cluster checksum settings, so this new option can be useful to upgrade non-checksum old clusters. @@ -136,7 +136,7 @@ Prevent COPY FROM from treating \. as an e -psql will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to PostgreSQL 18 servers might + will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to PostgreSQL 18 servers might experience \copy problems. This release also enforces that \. must appear alone on a line. @@ -1015,7 +1015,7 @@ Have query jumbling of arrays consider only the first and last array elements (D -Jumbling is used by pg_stat_statements. +Jumbling is used by . @@ -1645,7 +1645,7 @@ Require primary/foreign key relationships to use either deterministic collations -The restore of a pg_dump, also used by pg_upgrade, will fail if these requirements are not met; schema changes must be made for these upgrade methods to succeed. +The restore of a , also used by , will fail if these requirements are not met; schema changes must be made for these upgrade methods to succeed. @@ -1728,7 +1728,7 @@ This was previously erroneously prohibited. - <link linkend="sql-copy"><command>COPY</command></link> + <xref linkend="sql-copy"/> @@ -1796,7 +1796,7 @@ Previously, the COPY worked but the FREEZE - <link linkend="sql-explain"><command>EXPLAIN</command></link> + <xref linkend="sql-explain"/> @@ -2219,7 +2219,7 @@ Allow regexp_match[es]()/regexp_like() - <link linkend="libpq">libpq</link> + <xref linkend="libpq"/> @@ -2502,7 +2502,7 @@ Author: Peter Eisentraut -Change initdb to default to enabling checksums (Greg Sabino Mullane) +Change to default to enabling checksums (Greg Sabino Mullane) § § @@ -2554,7 +2554,7 @@ Author: Robert Haas -Add pg_combinebackup option / to enable hard linking (Israel Barth Rubio, Robert Haas) +Add option / to enable hard linking (Israel Barth Rubio, Robert Haas) § @@ -2582,7 +2582,7 @@ Author: Masahiko Sawada -If pg_rewind's specifies a database name, use it in output (Masahiko Sawada) +If 's specifies a database name, use it in output (Masahiko Sawada) § @@ -2617,12 +2617,12 @@ Author: Andrew Dunstan -Allow pg_dumpall to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan) +Allow to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan) § -Also modify pg_restore to handle such dumps. Previously pg_dumpall only supported text format. +Also modify to handle such dumps. Previously pg_dumpall only supported text format. @@ -2633,7 +2633,7 @@ Author: Jeff Davis -Add pg_dump options , , and (Jeff Davis) +Add options , , and (Jeff Davis) § @@ -2647,7 +2647,7 @@ Author: Nathan Bossart -Add pg_dump and pg_dumpall option to dump sequence data that would normally be excluded (Nathan Bossart) +Add pg_dump and option to dump sequence data that would normally be excluded (Nathan Bossart) § § @@ -2660,7 +2660,8 @@ Author: Jeff Davis -Add pg_dump, pg_dumpall, and pg_restore options , , , and (Corey Huinker, Jeff Davis) +Add , , and + options , , , and (Corey Huinker, Jeff Davis) § @@ -2672,7 +2673,9 @@ Author: Tom Lane -Add option to disable row level security policy processing in pg_dump, pg_dumpall, pg_restore (Nikolay Samokhvalov) +Add option to disable row level security policy processing in +, , + (Nikolay Samokhvalov) § @@ -2686,7 +2689,7 @@ This is useful for migrating to systems with different policies. - <link linkend="pgupgrade"><application>pg_upgrade</application></link> + <xref linkend="pgupgrade"/> @@ -2812,7 +2815,7 @@ Author: Amit Kapila -Add pg_createsubscriber option to create logical replicas for all databases (Shubham Khanna) +Add option to create logical replicas for all databases (Shubham Khanna) § @@ -2848,7 +2851,7 @@ Author: Masahiko Sawada -Add pg_recvlogical option to specify failover slots (Hayato Kuroda) +Add option to specify failover slots (Hayato Kuroda) § @@ -3087,7 +3090,7 @@ Author: Peter Eisentraut -Add support for Python "Limited API" in PL/Python (Peter Eisentraut) +Add support for Python "Limited API" in (Peter Eisentraut) § § @@ -3239,7 +3242,7 @@ Author: Masahiko Sawada -Add extension pg_logicalinspect to inspect logical snapshots (Bertrand Drouvot) +Add extension to inspect logical snapshots (Bertrand Drouvot) § @@ -3289,7 +3292,8 @@ Author: Peter Eisentraut -Allow SCRAM authentication from the client to be passed to postgres_fdw servers (Matheus Alcantara, Peter Eisentraut) +Allow SCRAM authentication from the client to be passed to + servers (Matheus Alcantara, Peter Eisentraut) § @@ -3307,7 +3311,8 @@ Author: Peter Eisentraut -Allow SCRAM authentication from the client to be passed to dblink servers (Matheus Alcantara) +Allow SCRAM authentication from the client to be passed to + servers (Matheus Alcantara) § @@ -3319,7 +3324,7 @@ Author: Fujii Masao -Add on_error and log_verbosity options to file_fdw (Atsushi Torikoshi) +Add on_error and log_verbosity options to (Atsushi Torikoshi) § @@ -3351,7 +3356,8 @@ Author: Nathan Bossart -Add configurable variable min_password_length to passwordcheck (Emanuele Musella, Maurizio Boriani) +Add configurable variable min_password_length to + (Emanuele Musella, Maurizio Boriani) § @@ -3379,7 +3385,7 @@ Author: Tom Lane -Add isn server variable weak to control invalid check digit acceptance (Viktor Holmberg) +Add server variable weak to control invalid check digit acceptance (Viktor Holmberg) § @@ -3464,7 +3470,7 @@ Allow extensions to use the server's cumulative statistics API - <link linkend="pgstatstatements"><application>pg_stat_statements</application></link> + <xref linkend="pgstatstatements"/> @@ -3533,7 +3539,7 @@ Add pg_stat_statements.wal_buffers_full - <link linkend="pgcrypto"><application>pgcrypto</application></link> + <xref linkend="pgcrypto"/> From 6c29088fc6e269b7d64797bb62533b82afe03d93 Mon Sep 17 00:00:00 2001 From: Jeff Davis Date: Thu, 19 Jun 2025 12:43:27 -0700 Subject: [PATCH 039/181] Correct docs about partitions and EXCLUDE constraints. In version 17 we added support for cross-partition EXCLUDE constraints, as long as they included all partition key columns and compared them with equality (see 8c852ba9a4). I updated the docs for exclusion constraints, but I missed that the docs for CREATE TABLE still said that they were not supported. This commit fixes that. Author: Paul A. Jungwirth Co-authored-by: Jeff Davis Discussion: https://postgr.es/m/c955d292-b92d-42d1-a2a0-1ec6715a2546@illuminatedcomputing.com Backpatch-through: 17 --- doc/src/sgml/ref/create_table.sgml | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 4a41b2f553007..a581691818278 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -447,11 +447,6 @@ WITH ( MODULUS numeric_literal, REM the values in the new row, an error will be reported. - - Partitioned tables do not support EXCLUDE constraints; - however, you can define these constraints on individual partitions. - - See for more discussion on table partitioning. @@ -1162,6 +1157,18 @@ WITH ( MODULUS numeric_literal, REM exclusion constraint on a subset of the table; internally this creates a partial index. Note that parentheses are required around the predicate. + + + When establishing an exclusion constraint for a multi-level partition + hierarchy, all the columns in the partition key of the target + partitioned table, as well as those of all its descendant partitioned + tables, must be included in the constraint definition. Additionally, + those columns must be compared using the equality operator. These + restrictions ensure that potentially-conflicting rows will exist in the + same partition. The constraint may also refer to other columns which + are not a part of any partition key, which can be compared using any + appropriate operator. + From a8360f074cc03a7cb73a4aaa6d8caab0e0bf0a0f Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Thu, 19 Jun 2025 17:13:58 -0400 Subject: [PATCH 040/181] doc PG 18 relnotes: add links to command and struct tags --- doc/src/sgml/release-18.sgml | 78 ++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index 75e17f1a0c6de..11a4f99a27236 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -100,7 +100,7 @@ Deprecate MD5 password authentication (Nathan Bossart) -Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords. +Support for MD5 passwords will be removed in a future major version release. and now emit deprecation warnings when setting MD5 passwords. These warnings can be disabled by setting the parameter to off. @@ -112,7 +112,7 @@ Author: David Rowley -Change VACUUM and ANALYZE to process the inheritance children of a parent (Michael Harris) +Change and to process the inheritance children of a parent (Michael Harris) § @@ -130,7 +130,7 @@ Author: Tom Lane -Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane) +Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane) § § @@ -154,7 +154,7 @@ Disallow unlogged partitioned tables (Michael Paquier) -Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged. +Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged. @@ -170,7 +170,7 @@ Execute AFTER triggers as the role that was active when trigg -Previously such triggers were run as the role that was active at trigger execution time (e.g., at COMMIT). This is significant for cases where the role is changed between queue time and +Previously such triggers were run as the role that was active at trigger execution time (e.g., at ). This is significant for cases where the role is changed between queue time and transaction commit. @@ -182,7 +182,7 @@ Author: Fujii Masao -Remove non-functional support for rule privileges in GRANT/REVOKE (Fujii Masao) +Remove non-functional support for rule privileges in / (Fujii Masao) § @@ -198,7 +198,7 @@ Author: David Rowley -Remove column pg_backend_memory_contexts.parent (Melih Mutlu) +Remove column pg_backend_memory_contexts.parent (Melih Mutlu) § @@ -319,7 +319,7 @@ Author: Richard Guo -Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo) +Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo) § @@ -607,7 +607,7 @@ Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavu This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc. This is enabled by server variable , with server variables and added to control it. This also enables - and values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used + and values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used for asynchronous I/O. @@ -679,7 +679,7 @@ Author: Nathan Bossart -Add server variable to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh) +Add server variable to control file truncation during (Nathan Bossart, Gurjeet Singh) § @@ -768,7 +768,7 @@ Add server variable to log lock acquisit -Specifically it reports SELECT ... NOWAIT lock failures. +Specifically it reports SELECT ... NOWAIT lock failures. @@ -779,7 +779,7 @@ Author: Michael Paquier -Modify pg_stat_all_tables and its variants to report the time spent in VACUUM, ANALYZE, and their automatic variants (Sami Imseih) +Modify pg_stat_all_tables and its variants to report the time spent in VACUUM, ANALYZE, and their automatic variants (Sami Imseih) § @@ -797,13 +797,13 @@ Author: Nathan Bossart -Add delay time reporting to VACUUM and ANALYZE (Bertrand Drouvot, Nathan Bossart) +Add delay time reporting to and (Bertrand Drouvot, Nathan Bossart) § § -This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE +This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE mode; tracking must be enabled with the server variable . @@ -861,7 +861,7 @@ Author: Michael Paquier -Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz) +Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz) § @@ -900,7 +900,7 @@ Author: Michael Paquier -Change server variable to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) +Change server variable to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) § @@ -959,7 +959,7 @@ Author: Fujii Masao -Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov) +Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov) § @@ -991,7 +991,7 @@ Author: Michael Paquier -Add columns to pg_stat_database to report parallel workers activity (Benoit Lobréau) +Add columns to pg_stat_database to report parallel workers activity (Benoit Lobréau) § @@ -1042,7 +1042,7 @@ Author: David Rowley -Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley) +Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley) § @@ -1102,7 +1102,7 @@ Author: Fujii Masao -Allow ALTER DEFAULT PRIVILEGES to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe) +Allow to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe) § @@ -1378,7 +1378,7 @@ Author: Amit Kapila -Change the default CREATE SUBSCRIPTION streaming option from off to parallel (Vignesh C) +Change the default streaming option from off to parallel (Vignesh C) § @@ -1392,7 +1392,7 @@ Author: Amit Kapila -Allow ALTER SUBSCRIPTION to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou) +Allow ALTER SUBSCRIPTION to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou) § § @@ -1422,7 +1422,7 @@ Log conflicts while applying logical replication changes (Zhijie Hou, Nisha Moon -Also report in new columns of pg_stat_subscription_stats. +Also report in new columns of pg_stat_subscription_stats. @@ -1471,7 +1471,7 @@ Add OLD/NEW support to RETURNING< -Previously RETURNING only returned new values for INSERT and UPDATE, and old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax +Previously RETURNING only returned new values for and , and old values for ; would return the appropriate value for the internal query executed. This new syntax allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases old and new. These aliases can be renamed to avoid identifier conflicts. @@ -1489,7 +1489,7 @@ Allow foreign tables to be created like existing local tables (Zhang Mingli) -The syntax is CREATE FOREIGN TABLE ... LIKE. +The syntax is CREATE FOREIGN TABLE ... LIKE. @@ -1544,7 +1544,7 @@ Author: David Rowley -Allow VACUUM and ANALYZE to process partitioned tables without processing their children (Michael Harris) +Allow and to process partitioned tables without processing their children (Michael Harris) § @@ -1588,7 +1588,7 @@ Add server variable to control the file c -This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone. +This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone. @@ -1630,7 +1630,7 @@ Allow CHECK and foreign key constraints to be specified as -This also adds column pg_constraint.conenforced. +This also adds column pg_constraint.conenforced. - - - -Add function pg_check_fipsmode() to report the server's FIPS mode (Daniel Gustafsson) -§ - - - -Add optional parameter to json{b}_strip_nulls to allow removal of null array elements (Florents Tselai) +Add optional parameter to json{b}_strip_nulls to allow removal of null array elements (Florents Tselai) § @@ -1981,7 +1969,7 @@ Author: Tom Lane -Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He) +Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He) § @@ -1993,7 +1981,7 @@ Author: Michael Paquier -Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev) +Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev) § @@ -2005,7 +1993,7 @@ Author: Nathan Bossart -Add function reverse() to reverse bytea bytes (Aleksander Alekseev) +Add function reverse() to reverse bytea bytes (Aleksander Alekseev) § @@ -2017,7 +2005,7 @@ Author: Dean Rasheed -Allow casting between integer types and bytea (Aleksander Alekseev) +Allow casting between integer types and bytea (Aleksander Alekseev) § @@ -2033,7 +2021,7 @@ Author: Peter Eisentraut -Update Unicode data to Unicode 16.0.0 (Peter Eisentraut) +Update Unicode data to Unicode 16.0.0 (Peter Eisentraut) § @@ -2045,7 +2033,7 @@ Author: Tom Lane -Add full text search stemming for Estonian (Tom Lane) +Add full text search stemming for Estonian (Tom Lane) § @@ -2057,12 +2045,12 @@ Author: Tom Lane -Improve the XML error codes to more closely match the SQL standard (Tom Lane) +Improve the XML error codes to more closely match the SQL standard (Tom Lane) § -These errors are reported via SQLSTATE. +These errors are reported via SQLSTATE. @@ -2082,7 +2070,7 @@ Author: Jeff Davis -Add function CASEFOLD() to allow for more sophisticated case-insensitive matching (Jeff Davis) +Add function casefold() to allow for more sophisticated case-insensitive matching (Jeff Davis) § @@ -2100,7 +2088,7 @@ Author: Tom Lane -Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov) +Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov) § § @@ -2113,7 +2101,7 @@ Author: Tom Lane -Add a WEEK option to EXTRACT() (Tom Lane) +Add a WEEK option to EXTRACT() (Tom Lane) § @@ -2137,7 +2125,7 @@ Author: Tom Lane -Add roman numeral support to to_number() (Hunaid Sohail) +Add roman numeral support to to_number() (Hunaid Sohail) § @@ -2153,12 +2141,12 @@ Author: Masahiko Sawada -Add UUID version 7 generation function uuidv7() (Andrey Borodin) +Add UUID version 7 generation function uuidv7() (Andrey Borodin) § -This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs. +This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs. @@ -2169,7 +2157,7 @@ Author: Nathan Bossart -Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev) +Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev) § @@ -2181,7 +2169,7 @@ Author: Dean Rasheed -Add math functions gamma() and lgamma() (Dean Rasheed) +Add math functions gamma() and lgamma() (Dean Rasheed) § @@ -2193,7 +2181,7 @@ Author: Tom Lane -Allow => syntax for named cursor arguments in plpgsql (Pavel Stehule) +Allow => syntax for named cursor arguments in PL/pgSQL (Pavel Stehule) § @@ -2209,7 +2197,7 @@ Author: Tom Lane -Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() to use named arguments (Jian He) +Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() to use named arguments (Jian He) § @@ -2230,7 +2218,7 @@ Author: Robert Haas -Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio) +Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio) § @@ -2244,7 +2232,7 @@ Author: Heikki Linnakangas -Add libpq connection parameters and environment variables to specify the minimum and maximum acceptable protocol version for connections (Jelte Fennema-Nio) +Add libpq connection parameters and environment variables to specify the minimum and maximum acceptable protocol version for connections (Jelte Fennema-Nio) § § @@ -2257,7 +2245,7 @@ Author: Michael Paquier -Add libpq function PQservice() to return the connection service name (Michael Banck) +Add libpq function PQservice() to return the connection service name (Michael Banck) § @@ -2292,7 +2280,7 @@ Author: Álvaro Herrera -Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio) +Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio) § § § @@ -2308,7 +2296,7 @@ Author: Daniel Gustafsson -Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson) +Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson) § @@ -2354,7 +2342,7 @@ Allow psql to parse, bind, and close named prepared s -This is accomplished with new commands \parse, \bind_named, and \close. +This is accomplished with new commands \parse, \bind_named, and \close. @@ -2376,7 +2364,7 @@ Add psql backslash commands to allowing issuance of p -The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults. +The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults. @@ -2392,7 +2380,7 @@ Allow adding pipeline status to the psql prompt and a -The new prompt character is %P and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT. +The new prompt character is %P and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT. @@ -2431,7 +2419,7 @@ Author: Álvaro Herrera -Change psql's \conninfo to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail) +Change psql's to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail) § @@ -2443,7 +2431,7 @@ Author: Dean Rasheed -Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata) +Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata) § @@ -2455,7 +2443,7 @@ Author: Michael Paquier -Add access method details for partitioned relations in \dP+ (Justin Pryzby) +Add access method details for partitioned relations in \dP+ (Justin Pryzby) § @@ -2467,7 +2455,7 @@ Author: Magnus Hagander -Add default_version to the psql \dx extension output (Magnus Hagander) +Add default_version to the psql \dx extension output (Magnus Hagander) § @@ -2479,7 +2467,7 @@ Author: Daniel Gustafsson -Add psql variable WATCH_INTERVAL to set the default \watch wait time (Daniel Gustafsson) +Add psql variable to set the default \watch wait time (Daniel Gustafsson) § @@ -2537,7 +2525,7 @@ Author: Nathan Bossart -Add vacuumdb option to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart) +Add option to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart) § § @@ -2570,7 +2558,7 @@ Author: Robert Haas -Allow pg_verifybackup to verify tar-format backups (Amul Sul) +Allow to verify tar-format backups (Amul Sul) § @@ -2594,7 +2582,7 @@ Author: Masahiko Sawada -Add pg_resetwal option to change the default char signedness (Masahiko Sawada) +Add option to change the default char signedness (Masahiko Sawada) § @@ -2888,13 +2876,13 @@ Author: Michael Paquier -Separate the loading and running of injection points (Michael Paquier, Heikki Linnakangas) +Separate the loading and running of injection points (Michael Paquier, Heikki Linnakangas) § § -Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED(). +Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED(). @@ -2917,7 +2905,7 @@ Author: Heikki Linnakangas -Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas) +Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas) § @@ -2929,7 +2917,7 @@ Author: David Rowley -Improve the performance of processing long JSON strings using SIMD instructions (David Rowley) +Improve the performance of processing long JSON strings using SIMD (Single Instruction Multiple Data) (David Rowley) § @@ -2993,14 +2981,14 @@ Author: Tomas Vondra -Add configure option to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot) +Add configure option to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot) § § § -The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across +The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across NUMA nodes. @@ -3012,7 +3000,7 @@ Author: Nathan Bossart -Add TOAST table to pg_index to allow for very large expression indexes (Nathan Bossart) +Add TOAST table to pg_index to allow for very large expression indexes (Nathan Bossart) § @@ -3051,7 +3039,7 @@ Author: Peter Eisentraut -Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger) +Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger) § § @@ -3064,7 +3052,7 @@ Author: Peter Eisentraut -Add GiST support function stratnum() (Paul A. Jungwirth) +Add GiST support function stratnum() (Paul A. Jungwirth) § @@ -3076,7 +3064,7 @@ Author: Masahiko Sawada -Record the default CPU signedness of char in pg_controldata (Masahiko Sawada) +Record the default CPU signedness of char in (Masahiko Sawada) § @@ -3146,12 +3134,12 @@ Author: Tom Lane -Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov) +Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov) § -This information can be access via the new function pg_get_loaded_modules(). +This information can be access via the new function pg_get_loaded_modules(). @@ -3162,7 +3150,7 @@ Author: Tom Lane -Document that SPI_connect()/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin) +Document that SPI_connect()/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin) § @@ -3173,25 +3161,25 @@ Errors are always reported via ereport(). -Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev) -§ +Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut) +§ -Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut) -§ +Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev) +§ @@ -3254,7 +3242,7 @@ Author: Robert Haas -Add extension pg_overexplain which adds debug details to EXPLAIN output (Robert Haas) +Add extension which adds debug details to EXPLAIN output (Robert Haas) § @@ -3272,7 +3260,7 @@ Author: Fujii Masao -Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge) +Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge) § § § @@ -3292,15 +3280,15 @@ Author: Peter Eisentraut -Allow SCRAM authentication from the client to be passed to +Allow SCRAM authentication from the client to be passed to servers (Matheus Alcantara, Peter Eisentraut) § This avoids storing postgres_fdw authentication information in the database, and is enabled with the -postgres_fdw use_scram_passthrough connection option. libpq uses new connection -parameters scram_client_key and scram_server_key. +postgres_fdw use_scram_passthrough connection option. libpq uses new connection +parameters and . @@ -3373,7 +3361,7 @@ Author: Tatsuo Ishii -Have pgbench report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata) +Have report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata) § @@ -3390,7 +3378,7 @@ Add server variable weak to control inv -This was previously only controlled by function isn_weak(). +This was previously only controlled by function isn_weak(). @@ -3401,7 +3389,7 @@ Author: Heikki Linnakangas -Allow values to be sorted to speed btree_gist index builds (Bernd Helmle, Andrey Borodin) +Allow values to be sorted to speed index builds (Bernd Helmle, Andrey Borodin) § @@ -3413,7 +3401,7 @@ Author: Tomas Vondra -Add amcheck function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin) +Add check function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin) § @@ -3425,12 +3413,12 @@ Author: Andres Freund -Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz) +Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz) § -The existing function pg_buffercache_evict() now returns the buffer flush status. +The existing function pg_buffercache_evict() now returns the buffer flush status. @@ -3445,7 +3433,7 @@ Author: Robert Haas -Allow extensions to install custom EXPLAIN options (Robert Haas, Sami Imseih) +Allow extensions to install custom options (Robert Haas, Sami Imseih) § § § @@ -3550,7 +3538,7 @@ Author: Álvaro Herrera -Add pgcrypto functions sha256crypt() and sha512crypt() (Bernd Helmle) +Add pgcrypto algorithms sha256crypt and sha512crypt (Bernd Helmle) § @@ -3562,11 +3550,23 @@ Author: Daniel Gustafsson -Add CFB mode to pgcrypto encryption and decryption (Umar Hayat) +Add CFB mode to pgcrypto encryption and decryption (Umar Hayat) § + + + + +Add function fips_mode() to report the server's FIPS mode (Daniel Gustafsson) +§ + + + - - -Change default to enable data checksums (Greg Sabino Mullane) -§ - + + + Change default to enable data checksums + (Greg Sabino Mullane) + § + - -Checksums can be disabled with the new initdb option . - requires matching cluster checksum settings, so this new -option can be useful to upgrade non-checksum old clusters. - - + + Checksums can be disabled with the + new initdb option + . + requires matching cluster checksum settings, so this new option can + be useful to upgrade non-checksum old clusters. + + - - -Change time zone abbreviation handling (Tom Lane) -§ - + + + Change time zone abbreviation handling (Tom Lane) + § + - -The system will now favor the current session's time zone abbreviations before checking the server variable . Previously timezone_abbreviations was -checked first. - - + + The system will now favor the current session's time + zone abbreviations before checking the server variable + . Previously + timezone_abbreviations was checked first. + + - - -Deprecate MD5 password authentication (Nathan Bossart) -§ - + + + Deprecate MD5 password + authentication (Nathan Bossart) + § + - -Support for MD5 passwords will be removed in a future major version release. and now emit deprecation warnings when setting MD5 passwords. -These warnings can be disabled by setting the parameter to off. - - + + Support for MD5 passwords will be removed in a future major + version release. and now emit deprecation warnings when + setting MD5 passwords. These warnings can be disabled by setting + the parameter to + off. + + - - -Change and to process the inheritance children of a parent (Michael Harris) -§ - + + + Change and + to process the inheritance children of a parent (Michael Harris) + § + - -The previous behavior can be performed by using the new ONLY option. - - + + The previous behavior can be performed by using the new + ONLY option. + + - - -Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane) -§ -§ - + + + Prevent COPY FROM + from treating \. as an end-of-file marker when + reading CSV files (Daniel Vérité, Tom Lane) + § + § + - - will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to PostgreSQL 18 servers might -experience \copy problems. This -release also enforces that \. must appear alone on a line. - - + + will still treat + \. as an end-of-file marker when reading + CSV files from STDIN. + Older psql clients connecting to + PostgreSQL 18 servers might experience \copy + problems. This release also enforces that \. + must appear alone on a line. + + - - -Disallow unlogged partitioned tables (Michael Paquier) -§ - + + + Disallow unlogged partitioned tables (Michael Paquier) + § + - -Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged. - - + + Previously ALTER TABLE SET + [UN]LOGGED did nothing, and the creation of an + unlogged partitioned table did not cause its children to be unlogged. + + - - -Execute AFTER triggers as the role that was active when trigger events were queued (Laurenz Albe) -§ - + + + Execute AFTER triggers as the role that was active when + trigger events were queued (Laurenz Albe) + § + - -Previously such triggers were run as the role that was active at trigger execution time (e.g., at ). This is significant for cases where the role is changed between queue time and -transaction commit. - - + + Previously such triggers were run as the role that was active at + trigger execution time (e.g., at ). + This is significant for cases where the role is changed between queue + time and transaction commit. + + - - -Remove non-functional support for rule privileges in / (Fujii Masao) -§ - + + + Remove non-functional support for rule privileges in / (Fujii Masao) + § + - -These have been non-functional since PostgreSQL 8.2. - - + + These have been non-functional since + PostgreSQL 8.2. + + - - -Remove column pg_backend_memory_contexts.parent (Melih Mutlu) -§ - + + + Remove column pg_backend_memory_contexts.parent + (Melih Mutlu) + § + - -This is no longer needed since pg_backend_memory_contexts.path was added. - - + + This is no longer needed since + pg_backend_memory_contexts.path + was added. + + - - -Change pg_backend_memory_contexts.level and pg_log_backend_memory_contexts() to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, Fujii Masao) -§ -§ -§ - + + + Change + pg_backend_memory_contexts.level + and pg_log_backend_memory_contexts() + to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, + Fujii Masao) + § + § + § + - -These were previously zero-based. - - + + These were previously zero-based. + + @@ -256,40 +292,48 @@ Author: Alexander Korotkov 2025-02-17 [fc069a3a6] Implement Self-Join Elimination --> - - -Automatically remove some unnecessary table self-joins (Andrey Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) -§ - + + + Automatically remove some unnecessary table self-joins (Andrey + Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) + § + - -This optimization can be disabled using server variable . - - + + This optimization can be disabled using server variable . + + - - -Convert some IN (VALUES ...) to x = ANY ... for better optimizer statistics (Alena Rybakina, Andrei Lepikhov) -§ - - + + + Convert some IN (VALUES + ...) to x = ANY ... for better + optimizer statistics (Alena Rybakina, Andrei Lepikhov) + § + + - - -Allow transforming OR-clauses to arrays for faster index processing (Alexander Korotkov, Andrey Lepikhov) -§ - - + + + Allow transforming OR-clauses + to arrays for faster index processing (Alexander Korotkov, Andrey + Lepikhov) + § + + - - -Speed up the processing of INTERSECT, EXCEPT, window aggregates, and view column aliases (Tom Lane, David Rowley) -§ -§ -§ -§ - - + + + Speed up the processing of INTERSECT, + EXCEPT, window aggregates, and view column aliases (Tom Lane, + David Rowley) + § + § + § + § + + - - -Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo) -§ - + + + Allow the keys of SELECT + DISTINCT to be internally reordered to avoid sorting + (Richard Guo) + § + - -This optimization can be disabled using . - - + + This optimization can be disabled using . + + - - -Ignore GROUP BY columns that are functionally dependent on other columns (Zhang Mingli, Jian He, David Rowley) -§ - + + + Ignore GROUP BY + columns that are functionally dependent on other columns (Zhang + Mingli, Jian He, David Rowley) + § + - -If a GROUP BY clause includes all columns of a unique index, as well as other columns of the same table, those other columns are redundant and can be dropped -from the grouping. This was already true for non-deferred primary keys. - - + + If a GROUP BY clause includes all columns of + a unique index, as well as other columns of the same table, those + other columns are redundant and can be dropped from the grouping. + This was already true for non-deferred primary keys. + + - - -Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richard Guo) -§ -§ -§ -§ - - - -This allows earlier row filtering. This release also fixes some GROUPING SETS queries that used to return incorrect results. - - + + + Allow some HAVING clauses + on GROUPING + SETS to be pushed to WHERE clauses + (Richard Guo) + § + § + § + § + + + + This allows earlier row filtering. This release also fixes some + GROUPING SETS queries that used to return + incorrect results. + + - - -Improve row estimates for generate_series() using numeric and timestamp values (David Rowley, Song Jinzhou) -§ -§ - - + + + Improve row estimates for generate_series() + using numeric + and timestamp + values (David Rowley, Song Jinzhou) + § + § + + - - -Allow the optimizer to use Right Semi Join plans (Richard Guo) -§ - + + + Allow the optimizer to use Right Semi Join plans + (Richard Guo) + § + - -Semi-joins are used when needing to find if there is at least one match. - - + + Semi-joins are used when needing to find if there is at least + one match. + + - - -Allow merge joins to use incremental sorts (Richard Guo) -§ - - + + + Allow merge joins to use incremental sorts + (Richard Guo) + § + + - - -Improve the efficiency of planning queries accessing many partitions (Ashutosh Bapat, Yuya Watari, David Rowley) -§ -§ - - + + + Improve the efficiency of planning queries accessing many partitions + (Ashutosh Bapat, Yuya Watari, David Rowley) + § + § + + - - -Allow partitionwise joins in more cases, and reduce its memory usage (Richard Guo, Tom Lane, Ashutosh Bapat) -§ -§ - - + + + Allow partitionwise + joins in more cases, and reduce its memory usage (Richard Guo, + Tom Lane, Ashutosh Bapat) + § + § + + - - -Improve cost estimates of partition queries (Nikita Malakhov, Andrei Lepikhov) -§ - - + + + Improve cost estimates of partition queries (Nikita Malakhov, + Andrei Lepikhov) + § + + - - -Improve SQL-language function plan caching (Alexander Pyhalov, Tom Lane) -§ -§ - - + + + Improve SQL-language + function plan caching (Alexander Pyhalov, Tom Lane) + § + § + + - - -Improve handling of disabled optimizer features (Robert Haas) -§ - - + + + Improve handling of disabled optimizer features (Robert Haas) + § + + @@ -498,18 +574,19 @@ Author: Peter Geoghegan 2025-04-04 [8a510275d] Further optimize nbtree search scan key comparisons. --> - - -Allow skip scans of btree indexes (Peter Geoghegan) -§ -§ - + + + Allow skip scans of btree indexes + (Peter Geoghegan) + § + § + - -This allows multi-column btree indexes to be used by queries that only -equality-reference the second or later indexed columns. - - + + This allows multi-column btree indexes to be used by queries that + only equality-reference the second or later indexed columns. + + - - -Allow non-btree unique indexes to be used as partition keys and in materialized views (Mark Dilger) -§ -§ - + + + Allow non-btree unique indexes to be used as partition keys and in + materialized views (Mark Dilger) + § + § + - -The index type must still support equality. - - + + The index type must still support equality. + + - - -Allow GIN indexes to be created in parallel (Tomas Vondra, Matthias van de Meent) -§ - - + + + Allow GIN indexes to + be created in parallel (Tomas Vondra, Matthias van de Meent) + § + + - - -Allow values to be sorted to speed range-type GiST and btree index builds (Bernd Helmle) -§ - - + + + Allow values to be sorted to speed range-type GiST and btree + index builds (Bernd Helmle) + § + + @@ -588,41 +669,51 @@ Author: Andres Freund 2025-03-30 [2a5e709e7] Enable IO concurrency on all systems --> - - -Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavuz, Melanie Plageman) -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ - - - -This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc. -This is enabled by server variable , with server variables and added to control it. This also enables - and values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used -for asynchronous I/O. - - + + + Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, + Nazir Bilal Yavuz, Melanie Plageman) + § + § + § + § + § + § + § + § + § + § + § + + + + This feature allows backends to queue multiple read requests, + which allows for more efficient sequential scans, bitmap + heap scans, vacuums, etc. This is enabled by server + variable , with server + variables and added to control it. + This also enables + and + values greater than zero for systems without + fadvise() support. The new system view pg_aios + shows the file handles being used for asynchronous I/O. + + - - -Improve the locking performance of queries that access many relations (Tomas Vondra) -§ - - + + + Improve the locking performance of queries that access many relations + (Tomas Vondra) + § + + - - -Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis) -§ -§ -§ -§ -§ - + + + Improve the performance and reduce memory usage of hash joins and + GROUP BY + (David Rowley, Jeff Davis) + § + § + § + § + § + - -This also improves hash set operations used by EXCEPT, and hash lookups of subplan values. - - + + This also improves hash set operations used by EXCEPT, and hash + lookups of subplan values. + + - - -Allow normal vacuums to freeze some pages, even though they are all-visible (Melanie Plageman) -§ -§ - + + + Allow normal vacuums to freeze some pages, even though they are + all-visible (Melanie Plageman) + § + § + - -This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting . -Previously vacuum never processed all-visible pages until freezing was required. - - + + This reduces the overhead of later full-relation + freezing. The aggressiveness of this can be + controlled by server variable and per-table setting . Previously + vacuum never processed all-visible pages until freezing was required. + + - - -Add server variable to control file truncation during (Nathan Bossart, Gurjeet Singh) -§ - + + + Add server variable to control + file truncation during (Nathan Bossart, + Gurjeet Singh) + § + - -A storage-level parameter with the same name and behavior already existed. - - + + A storage-level parameter with the same name and behavior already + existed. + + - - -Increase server variables 's and 's default values to 16 (Melanie Plageman) -§ -§ - + + + Increase server variables 's and 's default values to 16 + (Melanie Plageman) + § + § + - -This more accurately reflects modern hardware. - - + + This more accurately reflects modern hardware. + + @@ -721,72 +826,87 @@ Author: Melanie Plageman 2025-03-12 [9219093ca] Modularize log_connections output --> - - -Increase the logging granularity of server variable (Melanie Plageman) -§ - + + + Increase the logging granularity of server variable (Melanie Plageman) + § + - -This server variable was previously only boolean, which is still supported. - - + + This server variable was previously only boolean, which is still + supported. + + - - -Add log_connections option to report the duration of connection stages (Melanie Plageman) -§ - - + + + Add log_connections option to report the duration + of connection stages (Melanie Plageman) + § + + - - -Add escape %L to output the client IP address (Greg Sabino Mullane) -§ - - + + + Add escape + %L to output the client IP + address (Greg Sabino Mullane) + § + + - - -Add server variable to log lock acquisition failures (Yuki Seino) -§ - + + + Add server variable to log + lock acquisition failures (Yuki Seino) + § + - -Specifically it reports SELECT ... NOWAIT lock failures. - - + + Specifically it reports SELECT + ... NOWAIT lock failures. + + - - -Modify pg_stat_all_tables and its variants to report the time spent in , , and their automatic variants (Sami Imseih) -§ - + + + Modify pg_stat_all_tables + and its variants to report the time spent in , , and their + automatic variants (Sami Imseih) + § + - -The new columns are total_vacuum_time, total_autovacuum_time, total_analyze_time, and total_autoanalyze_time. - - + + The new columns are total_vacuum_time, + total_autovacuum_time, + total_analyze_time, and + total_autoanalyze_time. + + - - -Add delay time reporting to and (Bertrand Drouvot, Nathan Bossart) -§ -§ - + + + Add delay time reporting to and (Bertrand Drouvot, Nathan Bossart) + § + § + - -This information appears in the server log, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of and when in VERBOSE -mode; tracking must be enabled with the server variable . - - + + This information appears in the server log, the system views pg_stat_progress_vacuum + and pg_stat_progress_analyze, + and the output of and when in VERBOSE + mode; tracking must be enabled with the server variable . + + - - -Add WAL, CPU, and average read statistics output to ANALYZE VERBOSE (Anthonin Bonnefoy) -§ -§ - - + + + Add WAL, CPU, and average + read statistics output to ANALYZE VERBOSE + (Anthonin Bonnefoy) + § + § + + - - -Add full WAL buffer count to VACUUM/ANALYZE (VERBOSE) and autovacuum log output (Bertrand Drouvot) -§ - - + + + Add full WAL buffer count to + VACUUM/ANALYZE (VERBOSE) + and autovacuum log output (Bertrand Drouvot) + § + + - - -Add per-backend I/O statistics reporting (Bertrand Drouvot) -§ -§ - + + + Add per-backend I/O statistics reporting (Bertrand Drouvot) + § + § + - -The statistics are accessed via pg_stat_get_backend_io(). Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). - - + + The statistics are accessed via pg_stat_get_backend_io(). + Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). + + - - -Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz) -§ - + + + Add pg_stat_io + columns to report I/O activity in bytes (Nazir Bilal Yavuz) + § + - -The new columns are read_bytes, write_bytes, and extend_bytes. The op_bytes column, which always equaled BLCKSZ, has been removed. - - + + The new columns are read_bytes, + write_bytes, and + extend_bytes. The + op_bytes column, which always equaled + BLCKSZ, + has been removed. + + - - -Add WAL I/O activity rows to pg_stat_io (Nazir Bilal Yavuz, Bertrand Drouvot, Michael Paquier) -§ -§ -§ - + + + Add WAL I/O activity rows to + pg_stat_io (Nazir Bilal Yavuz, Bertrand + Drouvot, Michael Paquier) + § + § + § + - -This includes WAL receiver activity and a wait event for such writes. - + + This includes WAL receiver activity and a wait + event for such writes. + - + - - -Change server variable to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot) -§ - - + + + Change server variable + to control tracking WAL timing + in pg_stat_io instead of pg_stat_wal + (Bertrand Drouvot) + § + + - - -Remove read/sync columns from pg_stat_wal (Bertrand Drouvot) -§ -§ - + + + Remove read/sync columns from pg_stat_wal + (Bertrand Drouvot) + § + § + - -This removes columns wal_write, wal_sync, wal_write_time, and wal_sync_time. - - + + This removes columns wal_write, + wal_sync, + wal_write_time, and + wal_sync_time. + + - - -Add function pg_stat_get_backend_wal() to return per-backend WAL statistics (Bertrand Drouvot) -§ - + + + Add function pg_stat_get_backend_wal() + to return per-backend WAL statistics (Bertrand + Drouvot) + § + - -Per-backend WAL statistics can be cleared via pg_stat_reset_backend_stats(). - - + + Per-backend WAL + statistics can be cleared via pg_stat_reset_backend_stats(). + + - - -Add function pg_ls_summariesdir() to specifically list the contents of PGDATA/pg_wal/summaries (Yushi Ogiwara) -§ - - + + + Add function pg_ls_summariesdir() + to specifically list the contents of PGDATA/pg_wal/summaries + (Yushi Ogiwara) + § + + - - -Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov) -§ - + + + Add column pg_stat_checkpointer.num_done + to report the number of completed checkpoints (Anton A. Melnikov) + § + - -Columns num_timed and num_requested count both completed and skipped checkpoints. - - + + Columns num_timed and + num_requested count both completed and + skipped checkpoints. + + - - -Add column pg_stat_checkpointer.slru_written to report SLRU buffers written (Nitin Jadhav) -§ - + + + Add column + pg_stat_checkpointer.slru_written + to report SLRU buffers written (Nitin Jadhav) + § + - -Also, modify the checkpoint server log message to report separate shared buffer and SLRU buffer values. - - + + Also, modify the checkpoint server log message to report separate + shared buffer and SLRU buffer values. + + - - -Add columns to pg_stat_database to report parallel worker activity (Benoit Lobréau) -§ - + + + Add columns to pg_stat_database + to report parallel worker activity (Benoit Lobréau) + § + - -The new columns are parallel_workers_to_launch and parallel_workers_launched. - - + + The new columns are + parallel_workers_to_launch and + parallel_workers_launched. + + - - -Have query id computation of arrays consider only the first and last array elements (Dmitry Dolgov, Sami Imseih) -§ -§ - + + + Have query id computation + of arrays consider only the first and last array elements (Dmitry + Dolgov, Sami Imseih) + § + § + - -Jumbling is used by . - - + + Jumbling is used by . + + - - -Adjust query id computations to group together queries using the same relation name (Michael Paquier, Sami Imseih) -§ - + + + Adjust query id computations to group together queries using the + same relation name (Michael Paquier, Sami Imseih) + § + - -This is true even if the tables in different schemas have different column names. - - + + This is true even if the tables in different schemas have different + column names. + + - - -Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley) -§ - - + + + Add column pg_backend_memory_contexts.type + to report the type of memory context (David Rowley) + § + + - - -Add column pg_backend_memory_contexts.path to show memory context parents (Melih Mutlu) -§ - - + + + Add column + pg_backend_memory_contexts.path + to show memory context parents (Melih Mutlu) + § + + @@ -1075,53 +1256,61 @@ Author: Michael Paquier 2024-07-10 [d898665bf] Extend pg_get_acl() to handle sub-object IDs --> - - -Add function pg_get_acl() to retrieve database access control details (Joel Jacobson) -§ -§ - - + + + Add function pg_get_acl() + to retrieve database access control details (Joel Jacobson) + § + § + + - - -Add function has_largeobject_privilege() to check large object privileges (Yugo Nagata) -§ - - + + + Add function has_largeobject_privilege() + to check large object privileges (Yugo Nagata) + § + + - - -Allow to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe) -§ - - + + + Allow to define + large object default privileges (Takatsuka Haruka, Yugo Nagata, + Laurenz Albe) + § + + - - -Add predefined role pg_signal_autovacuum_worker (Kirill Reshke) -§ - + + + Add predefined role pg_signal_autovacuum_worker + (Kirill Reshke) + § + - -This allows sending signals to autovacuum workers. - - + + This allows sending signals to autovacuum workers. + + @@ -1137,56 +1326,69 @@ Author: Daniel Gustafsson 2025-02-20 [b3f0be788] Add support for OAUTHBEARER SASL mechanism --> - - -Add support for the OAuth authentication method (Jacob Champion, Daniel Gustafsson, Thomas Munro) -§ - + + + Add support for the OAuth authentication + method (Jacob Champion, Daniel Gustafsson, Thomas Munro) + § + - -This adds an oauth authentication method to pg_hba.conf, libpq OAuth options, a server variable to load token validation libraries, and -a configure flag to add the required compile-time libraries. - - + + This adds an oauth authentication method to pg_hba.conf, + libpq OAuth options, a server variable to load + token validation libraries, and a configure flag + to add the required compile-time libraries. + + - - -Add server variable to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson) -§ - - + + + Add server variable to allow + specification of multiple colon-separated TLSv1.3 cipher suites + (Erica Zhang, Daniel Gustafsson) + § + + - - -Change server variable 's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) -§ - - + + + Change server variable 's default + to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) + § + + - - -Rename server variable ssl_ecdh_curve to and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson) -§ - + + + Rename server variable ssl_ecdh_curve to and allow multiple colon-separated + ECDH curves to be specified (Erica Zhang, + Daniel Gustafsson) + § + -The previous name still works. - - + + The previous name still works. + + - - -Make cancel request keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio) -§ -§ - + + + Make cancel request + keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio) + § + § + - -This is only possible when the server and client support wire protocol version 3.2, introduced in this release. - - + + This is only possible when the server and client support wire + protocol version 3.2, introduced in this release. + + - - -Add server variable to specify the maximum number of background workers (Nathan Bossart) -§ - + + + Add server variable + to specify the maximum number of background workers (Nathan Bossart) + § + - -With this variable set, can be adjusted at runtime up to this maximum without a server restart. - - + + With this variable set, + can be adjusted at runtime up to this maximum without a server + restart. + + - - -Allow specification of the fixed number of dead tuples that will trigger an autovacuum (Nathan Bossart, Frédéric Yhuel) -§ - + + + Allow specification of the fixed number of dead tuples that will + trigger an autovacuum (Nathan + Bossart, Frédéric Yhuel) + § + - -The server variable is . Percentages are still used for triggering. - - + + The server variable is . Percentages are + still used for triggering. + + - - -Change server variable to limit only files opened by a backend (Andres Freund) -§ - + + + Change server variable + to limit only files opened by a backend (Andres Freund) + § + - -Previously files opened by the postmaster were also counted toward this limit. - - + + Previously files opened by the postmaster were also counted toward + this limit. + + - - -Add server variable to report the required number of semaphores (Nathan Bossart) -§ - + + + Add server variable to + report the required number of semaphores (Nathan Bossart) + § + - -This is useful for operating system configuration. - - + + This is useful for operating system configuration. + + - - -Add server variable to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara) -§ -§ - - + + + Add server variable to + specify the location of extension control files (Peter Eisentraut, + Matheus Alcantara) + § + § + + - + @@ -1300,28 +1516,34 @@ Author: Amit Kapila 2025-02-19 [ac0e33136] Invalidate inactive replication slots. --> - - -Allow inactive replication slots to be automatically invalided using server variable (Nisha Moond, Bharath Rupireddy) -§ - - + + + Allow inactive replication slots to be automatically invalided using + server variable + (Nisha Moond, Bharath Rupireddy) + § + + - - -Add server variable to control the maximum active replication origins (Euler Taveira) -§ - + + + Add server variable to control the + maximum active replication origins (Euler Taveira) + § + - -This was previously controlled by , but this new setting allows a higher origin count in cases where fewer slots are required. - - + + This was previously controlled by , but this new setting allows + a higher origin count in cases where fewer slots are required. + + @@ -1343,33 +1565,44 @@ Author: Amit Kapila 2025-01-30 [6252b1eaf] Doc: Generated column replication. --> - - -Allow the values of generated columns to be logically replicated (Shubham Khanna, Vignesh C, Zhijie Hou, Shlok Kyal, Peter Smith) -§ -§ -§ -§ - - - -If the publication specifies a column list, all specified columns, generated and non-generated, are published. Without a specified column list, publication option publish_generated_columns -controls whether generated columns are published. Previously generated columns were not replicated and the subscriber had to compute the values if possible; this is particularly -useful for non-PostgreSQL subscribers which lack such a capability. - - + + + Allow the values of generated + columns to be logically replicated (Shubham Khanna, Vignesh C, + Zhijie Hou, Shlok Kyal, Peter Smith) + § + § + § + § + + + + If the publication specifies a column list, all specified + columns, generated and non-generated, are published. + Without a specified column list, publication option + publish_generated_columns controls whether + generated columns are published. Previously generated columns + were not replicated and the subscriber had to compute + the values if possible; this is particularly useful for + non-PostgreSQL subscribers which lack + such a capability. + + - - -Change the default streaming option from off to parallel (Vignesh C) -§ - - + + + Change the default streaming + option from off to parallel + (Vignesh C) + § + + - - -Allow to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou) -§ -§ - - + + + Allow to change the + replication slot's two-phase commit behavior (Hayato Kuroda, Ajin + Cherian, Amit Kapila, Zhijie Hou) + § + § + + - - -Log conflicts while applying logical replication changes (Zhijie Hou, Nisha Moond) -§ -§ -§ -§ -§ - + + + Log conflicts while + applying logical replication changes (Zhijie Hou, Nisha Moond) + § + § + § + § + § + - -Also report in new columns of pg_stat_subscription_stats. - - + + Also report in new columns of pg_stat_subscription_stats. + + @@ -1434,112 +1671,136 @@ Author: Richard Guo 2025-02-25 [1e4351af3] Expand virtual generated columns in the planner --> - - -Allow generated columns to be virtual, and make them the default (Peter Eisentraut, Jian He, Richard Guo, Dean Rasheed) -§ -§ -§ - + + + Allow generated + columns to be virtual, and make them the default (Peter + Eisentraut, Jian He, Richard Guo, Dean Rasheed) + § + § + § + - -Virtual generated columns generate their values when the columns are read, not written. The write behavior can still be specified via the STORED option. - - + + Virtual generated columns generate their values when the columns + are read, not written. The write behavior can still be specified + via the STORED option. + + - - -Add OLD/NEW support to RETURNING in DML queries (Dean Rasheed) -§ - + + + Add OLD/NEW support to RETURNING in + DML queries (Dean Rasheed) + § + - -Previously RETURNING only returned new values for and , and old values for ; would return the appropriate value for the internal query executed. This new syntax -allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases old and new. These aliases can be renamed to -avoid identifier conflicts. - - + + Previously RETURNING only returned new values for + and , and old + values for ; + would return the appropriate value for the internal query executed. + This new syntax allows the RETURNING list of + INSERT/UPDATE/DELETE/MERGE + to explicitly return old and new values by using the special aliases + old and new. These aliases + can be renamed to avoid identifier conflicts. + + - - -Allow foreign tables to be created like existing local tables (Zhang Mingli) -§ - + + + Allow foreign tables to be created like existing local tables + (Zhang Mingli) + § + - -The syntax is CREATE FOREIGN TABLE ... LIKE. - - + + The syntax is CREATE + FOREIGN TABLE ... LIKE. + + - - -Allow LIKE with nondeterministic collations (Peter Eisentraut) -§ - - + + + Allow LIKE + with nondeterministic + collations (Peter Eisentraut) + § + + - - -Allow text position search functions with nondeterministic collations (Peter Eisentraut) -§ - + + + Allow text position search functions with nondeterministic collations + (Peter Eisentraut) + § + - -These used to generate an error. - - + + These used to generate an error. + + - - -Add builtin collation provider PG_UNICODE_FAST (Jeff Davis) -§ - + + + Add builtin collation provider PG_UNICODE_FAST + (Jeff Davis) + § + - -This locale supports case mapping, but sorts in code point order, not natural language order. - - + + This locale supports case mapping, but sorts in code point order, + not natural language order. + + - - -Allow and to process partitioned tables without processing their children (Michael Harris) -§ - + + + Allow and + to process partitioned tables without processing their children + (Michael Harris) + § + - -This is enabled with the new ONLY option. This is useful since autovacuum does not process partitioned tables, just its children. - - + + This is enabled with the new ONLY option. This is + useful since autovacuum does not process partitioned tables, just + its children. + + - - -Add functions to modify per-relation and per-column optimizer statistics (Corey Huinker) -§ -§ -§ - + + + Add functions to modify per-relation and per-column optimizer + statistics (Corey Huinker) + § + § + § + - -The functions are pg_restore_relation_stats(), pg_restore_attribute_stats(), pg_clear_relation_stats(), and pg_clear_attribute_stats(). - - + + The functions are pg_restore_relation_stats(), + pg_restore_attribute_stats(), + pg_clear_relation_stats(), and + pg_clear_attribute_stats(). + + - - -Add server variable to control the file copying method (Nazir Bilal Yavuz) -§ - + + + Add server variable to control + the file copying method (Nazir Bilal Yavuz) + § + - -This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone. - - + + This controls whether CREATE DATABASE + ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET + TABLESPACE uses file copy or clone. + + <link linkend="ddl-constraints">Constraints</link> - + - - -Allow the specification of non-overlapping PRIMARY KEY and UNIQUE constraints (Paul A. Jungwirth) -§ - + + + Allow the specification of non-overlapping PRIMARY + KEY and UNIQUE + constraints (Paul A. Jungwirth) + § + - -This is specified by WITHOUT OVERLAPS on the last specified column. - - + + This is specified by WITHOUT OVERLAPS on the + last specified column. + + - - -Allow CHECK and foreign key constraints to be specified as NOT ENFORCED (Amul Sul) -§ -§ - + + + Allow CHECK + and foreign + key constraints to be specified as NOT + ENFORCED (Amul Sul) + § + § + - -This also adds column pg_constraint.conenforced. - + + This also adds column pg_constraint.conenforced. + + - - -Require primary/foreign key relationships to use either deterministic collations or the the same nondeterministic collations (Peter Eisentraut) -§ - + + + Require primary/foreign key + relationships to use either deterministic collations or the the + same nondeterministic collations (Peter Eisentraut) + § + - -The restore of a , also used by , will fail if these requirements are not met; schema changes must be made for these upgrade methods to succeed. - - + + The restore of a , also used by , will fail if these requirements are not met; + schema changes must be made for these upgrade methods to succeed. + + - - -Store column NOT NULL specifications in pg_constraint (Álvaro Herrera, Bernd Helmle) -§ - + + + Store column NOT + NULL specifications in pg_constraint + (Álvaro Herrera, Bernd Helmle) + § + - -This allows names to be specified for NOT NULL constraint. This also adds NOT NULL constraints to foreign tables and NOT NULL inheritance control to local tables. - - + + This allows names to be specified for NOT NULL + constraint. This also adds NOT NULL constraints + to foreign tables and NOT NULL inheritance + control to local tables. + + - - -Allow to set the NOT VALID attribute of NOT NULL constraints (Rushabh Lathia, Jian He) -§ - - + + + Allow to set the NOT + VALID attribute of NOT NULL constraints + (Rushabh Lathia, Jian He) + § + + - - -Allow modification of the inheritability of NOT NULL constraints (Suraj Kharage, Álvaro Herrera) -§ -§ - + + + Allow modification of the inheritability of NOT + NULL constraints (Suraj Kharage, Álvaro Herrera) + § + § + - -The syntax is ALTER TABLE ... ALTER CONSTRAINT ... [NO] INHERIT. - - + + The syntax is ALTER TABLE + ... ALTER CONSTRAINT ... [NO] INHERIT. + + - - -Allow NOT VALID foreign key constraints on partitioned tables (Amul Sul) -§ - - + + + Allow NOT VALID foreign key constraints on + partitioned tables (Amul Sul) + § + + - - -Allow dropping of constraints ONLY on partitioned tables (Álvaro Herrera) -§ - + + + Allow dropping + of constraints ONLY on partitioned tables + (Álvaro Herrera) + § + - -This was previously erroneously prohibited. - - + + This was previously erroneously prohibited. + + - + <xref linkend="sql-copy"/> - + - - -Add REJECT_LIMIT to control the number of invalid rows COPY FROM can ignore (Atsushi Torikoshi) -§ - + + + Add REJECT_LIMIT to control the number of invalid + rows COPY FROM can ignore (Atsushi Torikoshi) + § + - -This is available when ON_ERROR = 'ignore'. - - + + This is available when ON_ERROR = 'ignore'. + + - - -Allow COPY TO to copy rows from populated materialized views (Jian He) -§ - - + + + Allow COPY TO to copy rows from populated + materialized views (Jian He) + § + + - - -Add COPY LOG_VERBOSITY level silent to suppress log output of ignored rows (Atsushi Torikoshi) -§ - + + + Add COPY LOG_VERBOSITY level + silent to suppress log output of ignored rows + (Atsushi Torikoshi) + § + - -This new level suppresses output for discarded input rows when on_error = 'ignore'. - - + + This new level suppresses output for discarded input rows when + on_error = 'ignore'. + + - - -Disallow COPY FREEZE on foreign tables (Nathan Bossart) -§ - + + + Disallow COPY FREEZE on foreign tables (Nathan + Bossart) + § + - -Previously, the COPY worked but the FREEZE was ignored, so disallow this command. - - + + Previously, the COPY worked but the + FREEZE was ignored, so disallow this command. + + @@ -1793,36 +2102,39 @@ Author: David Rowley 2024-12-11 [c2a4078eb] Enable BUFFERS with EXPLAIN ANALYZE by default --> - - -Automatically include BUFFERS output in EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley) -§ - - + + + Automatically include BUFFERS output in + EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley) + § + + - - -Add full WAL buffer count to EXPLAIN (WAL) output (Bertrand Drouvot) -§ - - + + + Add full WAL buffer count to EXPLAIN + (WAL) output (Bertrand Drouvot) + § + + - - -In EXPLAIN ANALYZE, report the number of index lookups used per index scan node (Peter Geoghegan) -§ - - + + + In EXPLAIN ANALYZE, report the number of index + lookups used per index scan node (Peter Geoghegan) + § + + - - -Modify EXPLAIN to output fractional row counts (Ibrar Ahmed, Ilia Evdokimov, Robert Haas) -§ -§ - - + + + Modify EXPLAIN to output fractional row counts + (Ibrar Ahmed, Ilia Evdokimov, Robert Haas) + § + § + + - - -Add memory and disk usage details to Material, Window Aggregate, and common table expression nodes to EXPLAIN -output (David Rowley, Tatsuo Ishii) -§ -§ -§ -§ - - + + + Add memory and disk usage details to Material, + Window Aggregate, and common table expression + nodes to EXPLAIN output (David Rowley, Tatsuo + Ishii) + § + § + § + § + + - - -Add details about window function arguments to EXPLAIN output (Tom Lane) -§ - - + + + Add details about window function arguments to + EXPLAIN output (Tom Lane) + § + + - - -Add Parallel Bitmap Heap Scan worker cache statistics to EXPLAIN ANALYZE (David Geier, Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley) -§ - - + + + Add Parallel Bitmap Heap Scan worker cache + statistics to EXPLAIN ANALYZE (David Geier, + Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley) + § + + - - -Indicate disabled nodes in EXPLAIN ANALYZE output (Robert Haas, David Rowley, Laurenz Albe) -§ -§ -§ - - + + + Indicate disabled nodes in EXPLAIN ANALYZE output + (Robert Haas, David Rowley, Laurenz Albe) + § + § + § + + @@ -1922,137 +2241,159 @@ Author: Jeff Davis 2025-01-17 [286a365b9] Support Unicode full case mapping and conversion. --> - - -Improve Unicode full case mapping and conversion (Jeff Davis) -§ -§ - + + + Improve Unicode + full case mapping and conversion (Jeff Davis) + § + § + - -This adds the ability to do conditional and title case mapping, and case map single characters to multiple characters. - - + + This adds the ability to do conditional and title case mapping, + and case map single characters to multiple characters. + + - - -Allow jsonb null values to be cast to scalar types as NULL (Tom Lane) -§ - + + + Allow jsonb + null values to be cast to scalar types as + NULL (Tom Lane) + § + - -Previously such casts generated an error. - - + + Previously such casts generated an error. + + - - -Add optional parameter to json{b}_strip_nulls to allow removal of null array elements (Florents Tselai) -§ - - + + + Add optional parameter to json{b}_strip_nulls + to allow removal of null array elements (Florents Tselai) + § + + - - -Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He) -§ - - + + + Add function array_sort() + which sorts an array's first dimension (Junwang Zhao, Jian He) + § + + - - -Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev) -§ - - + + + Add function array_reverse() + which reverses an array's first dimension (Aleksander Alekseev) + § + + - - -Add function reverse() to reverse bytea bytes (Aleksander Alekseev) -§ - - + + + Add function reverse() + to reverse bytea bytes (Aleksander Alekseev) + § + + - - -Allow casting between integer types and bytea (Aleksander Alekseev) -§ - + + + Allow casting between integer types and bytea (Aleksander + Alekseev) + § + - -The integer values are stored as bytea two's complement values. - - + + The integer values are stored as bytea two's complement + values. + + - - -Update Unicode data to Unicode 16.0.0 (Peter Eisentraut) -§ - - + + + Update Unicode data to Unicode 16.0.0 (Peter + Eisentraut) + § + + - - -Add full text search stemming for Estonian (Tom Lane) -§ - - + + + Add full text search stemming for Estonian + (Tom Lane) + § + + - - -Improve the XML error codes to more closely match the SQL standard (Tom Lane) -§ - + + + Improve the XML + error codes to more closely match the SQL standard + (Tom Lane) + § + - -These errors are reported via SQLSTATE. - - + + These errors are reported via SQLSTATE. + + @@ -2068,16 +2409,20 @@ Author: Jeff Davis 2025-01-24 [bfc599206] Add SQL function CASEFOLD(). --> - - -Add function casefold() to allow for more sophisticated case-insensitive matching (Jeff Davis) -§ - + + + Add function casefold() + to allow for more sophisticated case-insensitive matching (Jeff Davis) + § + - -This allows more accurate comparisons, i.e., a character can have multiple upper or lower case equivalents, or upper or lower case conversion changes the number of characters. - - + + This allows more accurate comparisons, i.e., a character can have + multiple upper or lower case equivalents, or upper or lower case + conversion changes the number of characters. + + - - -Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov) -§ -§ - - + + + Allow MIN()/MAX() + aggregates on arrays and composite types (Aleksander Alekseev, + Marat Buharov) + § + § + + - - -Add a WEEK option to EXTRACT() (Tom Lane) -§ - - + + + Add a WEEK option to EXTRACT() + (Tom Lane) + § + + - - -Improve the output EXTRACT(QUARTER ...) for negative values (Tom Lane) -§ - - + + + Improve the output EXTRACT(QUARTER ...) for + negative values (Tom Lane) + § + + - - -Add roman numeral support to to_number() (Hunaid Sohail) -§ - + + + Add roman numeral support to to_number() + (Hunaid Sohail) + § + - -This is accessed via the RN pattern. - - + + This is accessed via the RN pattern. + + - - -Add UUID version 7 generation function uuidv7() (Andrey Borodin) -§ - + + + Add UUID + version 7 generation function uuidv7() + (Andrey Borodin) + § + - -This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs. - - + + This UUID value is + temporally sortable. Function alias uuidv4() + has been added to explicitly generate version 4 UUIDs. + + - - -Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev) -§ - - + + + Add functions crc32() + and crc32c() + to compute CRC values (Aleksander Alekseev) + § + + - - -Add math functions gamma() and lgamma() (Dean Rasheed) -§ - - + + + Add math functions gamma() + and lgamma() + (Dean Rasheed) + § + + - - -Allow => syntax for named cursor arguments in PL/pgSQL (Pavel Stehule) -§ - + + + Allow => syntax for named cursor arguments in + PL/pgSQL (Pavel Stehule) + § + - -We previously only accepted :=. - - + + We previously only accepted :=. + + - - -Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() to use named arguments (Jian He) -§ - - + + + Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() + to use named arguments (Jian He) + § + + @@ -2216,12 +2594,15 @@ Author: Robert Haas 2024-09-09 [cdb6b0fdb] Add PQfullProtocolVersion() to surface the precise proto --> - - -Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio) -§ - - + + + Add function PQfullProtocolVersion() + to report the full, including minor, protocol version number (Jacob + Champion, Jelte Fennema-Nio) + § + + - - -Add libpq connection parameters and environment variables to specify the minimum and maximum acceptable protocol version for connections (Jelte Fennema-Nio) -§ -§ - - + + + Add libpq connection parameters + and environment variables to + specify the minimum and maximum acceptable protocol version for + connections (Jelte Fennema-Nio) + § + § + + - - -Add libpq function PQservice() to return the connection service name (Michael Banck) -§ - - + + + Add libpq function PQservice() + to return the connection service name (Michael Banck) + § + + - - -Report changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) -§ -§ - - + + + Report changes to the client + (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) + § + § + + - - -Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio) -§ -§ -§ -§ -§ - - + + + Add PQtrace() output + for all message types, including authentication (Jelte Fennema-Nio) + § + § + § + § + § + + - - -Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson) -§ - + + + Add libpq connection parameter sslkeylogfile + which dumps out SSL key material (Abhishek Chanda, + Daniel Gustafsson) + § + - -This is useful for debugging. - - + + This is useful for debugging. + + - - -Modify some libpq function signatures to use int64_t (Thomas Munro) -§ - + + + Modify some libpq function signatures to use + int64_t (Thomas Munro) + § + - -These previously used pg_int64, which is now deprecated. - - + + These previously used pg_int64, which is now + deprecated. + + - <xref linkend="app-psql"/> + <xref linkend="app-psql"/> - + - - -Allow psql to parse, bind, and close named prepared statements (Anthonin Bonnefoy, Michael Paquier) -§ - + + + Allow psql to parse, bind, and close + named prepared statements (Anthonin Bonnefoy, Michael Paquier) + § + - -This is accomplished with new commands \parse, \bind_named, and \close. - - + + This is accomplished with new commands \parse, + \bind_named, + and \close. + + - - -Add psql backslash commands to allowing issuance of pipeline queries (Anthonin Bonnefoy) -§ -§ -§ - + + + Add psql backslash commands to allowing + issuance of pipeline queries (Anthonin Bonnefoy) + § + § + § + - -The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults. - - + + The new commands are \startpipeline, + \syncpipeline, \sendpipeline, + \endpipeline, \flushrequest, + \flush, and \getresults. + + - - -Allow adding pipeline status to the psql prompt and add related state variables (Anthonin Bonnefoy) -§ - + + + Allow adding pipeline status to the psql + prompt and add related state variables (Anthonin Bonnefoy) + § + - -The new prompt character is %P and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT. - - + + The new prompt character is %P and + the new psql variables are PIPELINE_SYNC_COUNT, + PIPELINE_COMMAND_COUNT, + and PIPELINE_RESULT_COUNT. + + - - -Allow adding the connection service name to the psql prompt or access it via psql variable (Michael Banck) -§ - - + + + Allow adding the connection service name to the + psql prompt or access it via + psql variable (Michael Banck) + § + + - - -Add psql option to use expanded mode on all list commands (Dean Rasheed) -§ - + + + Add psql option to use expanded mode on + all list commands (Dean Rasheed) + § + - -Adding backslash suffix x enables this. - - + + Adding backslash suffix x enables this. + + - - -Change psql's to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail) -§ - - + + + Change psql's to use tabular format + and include more information (Álvaro Herrera, Maiquel Grassi, + Hunaid Sohail) + § + + - - -Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata) -§ - - + + + Add function's leakproof indicator + to psql's \df+, + \do+, \dAo+, and + \dC+ outputs (Yugo Nagata) + § + + - - -Add access method details for partitioned relations in \dP+ (Justin Pryzby) -§ - - + + + Add access method details for partitioned relations in \dP+ + (Justin Pryzby) + § + + - - -Add default_version to the psql \dx extension output (Magnus Hagander) -§ - - + + + Add default_version + to the psql \dx + extension output (Magnus Hagander) + § + + - - -Add psql variable to set the default \watch wait time (Daniel Gustafsson) -§ - - + + + Add psql variable to set the default \watch + wait time (Daniel Gustafsson) + § + + - + - + - - Server Applications + + Server Applications - + - - -Change to default to enabling checksums (Greg Sabino Mullane) -§ -§ - + + + Change to default to enabling checksums + (Greg Sabino Mullane) + § + § + - -The new initdb option disables checksums. - - + + The new initdb option + disables checksums. + + - - -Add initdb option to avoid syncing heap/index files (Nathan Bossart) -§ - + + + Add initdb option + to avoid syncing heap/index + files (Nathan Bossart) + § + - -initdb option is still available to avoid syncing any files. - - + + initdb option + is still available to avoid syncing any files. + + - - -Add option to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart) -§ -§ - + + + Add option + to compute only missing + optimizer statistics (Corey Huinker, Nathan Bossart) + § + § + - -This option can only be used by and . - - + + This option can only be used by + and . + + - - -Add option / to enable hard linking (Israel Barth Rubio, Robert Haas) -§ - + + + Add option + / to enable hard linking + (Israel Barth Rubio, Robert Haas) + § + - -Only some files can be hard linked. This should not be used if the backups will be used independently. - - + + Only some files can be hard linked. This should not be used if the + backups will be used independently. + + - - -Allow to verify tar-format backups (Amul Sul) -§ - - + + + Allow to verify tar-format + backups (Amul Sul) + § + + - - -If 's specifies a database name, use it in output (Masahiko Sawada) -§ - - + + + If 's + specifies a database name, use it in + output (Masahiko Sawada) + § + + - - -Add option to change the default char signedness (Masahiko Sawada) -§ - - + + + Add option + to change the default + char signedness (Masahiko Sawada) + § + + - + @@ -2603,28 +3050,34 @@ Author: Andrew Dunstan 2025-04-04 [1495eff7b] Non text modes for pg_dumpall, correspondingly change pg --> - - -Allow to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan) -§ - + + + Allow to dump in the same output + formats as pg_dump supports (Mahendra + Singh Thalor, Andrew Dunstan) + § + - -Also modify to handle such dumps. Previously pg_dumpall only supported text format. - - + + Also modify to handle such dumps. + Previously pg_dumpall only supported + text format. + + - - -Add options , , and (Jeff Davis) -§ - - + + + Add options + , , + and (Jeff Davis) + § + + - - -Add pg_dump and option to dump sequence data that would normally be excluded (Nathan Bossart) -§ -§ - - + + + Add pg_dump and option to + dump sequence data that would normally be excluded (Nathan Bossart) + § + § + + - - -Add , , and - options , , , and (Corey Huinker, Jeff Davis) -§ - - + + + Add , , + and options + , , + , and + (Corey Huinker, Jeff Davis) + § + + - - -Add option to disable row level security policy processing in -, , - (Nikolay Samokhvalov) -§ - + + + Add option to disable row level + security policy processing in , + , + (Nikolay Samokhvalov) + § + - -This is useful for migrating to systems with different policies. - - + + This is useful for migrating to systems with different policies. + + @@ -2692,19 +3151,22 @@ Author: Jeff Davis 2025-02-20 [1fd1bd871] Transfer statistics during pg_upgrade. --> - - -Allow pg_upgrade to preserve optimizer statistics (Corey Huinker, Jeff Davis, Nathan Bossart) -§ -§ -§ -§ - + + + Allow pg_upgrade to preserve optimizer + statistics (Corey Huinker, Jeff Davis, Nathan Bossart) + § + § + § + § + - -Extended statistics are not preserved. Also add pg_upgrade option to disable statistics preservation. - - + + Extended statistics are not preserved. Also add + pg_upgrade option + to disable statistics preservation. + + - - -Allow pg_upgrade to process database checks in parallel (Nathan Bossart) -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ -§ - - - -This is controlled by the existing option. - - + + + Allow pg_upgrade to process database + checks in parallel (Nathan Bossart) + § + § + § + § + § + § + § + § + § + § + § + + + + This is controlled by the existing option. + + - - -Add pg_upgrade option to swap directories rather than copy, clone, or link files (Nathan Bossart) -§ - + + + Add pg_upgrade option + to swap directories rather than copy, clone, + or link files (Nathan Bossart) + § + - -This mode is potentially the fastest. - - + + This mode is potentially the fastest. + + - - -Add pg_upgrade option to set the default char signedness of new cluster (Masahiko Sawada) -§ -§ - + + + Add pg_upgrade option + to set the default + char signedness of new cluster (Masahiko Sawada) + § + § + - -This is to handle cases where a pre-PostgreSQL 18 cluster's default CPU signedness does not match the new cluster. - - + + This is to handle cases where a + pre-PostgreSQL 18 cluster's default + CPU signedness does not match the new cluster. + + @@ -2801,60 +3270,68 @@ Author: Amit Kapila 2025-03-28 [fb2ea12f4] pg_createsubscriber: Add '- -all' option. --> - - -Add option to create logical replicas for all databases (Shubham Khanna) -§ - - + + + Add option + to create logical replicas for all databases + (Shubham Khanna) + § + + - - -Add pg_createsubscriber option to remove publications (Shubham Khanna) -§ - - + + + Add pg_createsubscriber option + to remove publications (Shubham Khanna) + § + + - - -Add pg_createsubscriber option to enable prepared transactions (Shubham Khanna) -§ - - + + + Add pg_createsubscriber option + to enable prepared transactions + (Shubham Khanna) + § + + - - -Add option to specify failover slots (Hayato Kuroda) -§ - - + + + Add option + to specify failover slots (Hayato Kuroda) + § + + - - -Allow pg_recvlogical to work without (Hayato Kuroda) -§ - - + + + Allow pg_recvlogical + to work without + (Hayato Kuroda) + § + + @@ -2874,65 +3351,76 @@ Author: Michael Paquier 2024-07-18 [a0a5869a8] Add INJECTION_POINT_CACHED() to run injection points dir --> - - -Separate the loading and running of injection points (Michael Paquier, Heikki Linnakangas) -§ -§ - + + + Separate the loading and running of injection points + (Michael Paquier, Heikki Linnakangas) + § + § + - -Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED(). - - + + Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), + and such injection points can be run via INJECTION_POINT_CACHED(). + + - - -Support runtime arguments in injection points (Michael Paquier) -§ - - + + + Support runtime arguments in injection points (Michael Paquier) + § + + - - -Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas) -§ - - + + + Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() + (Heikki Linnakangas) + § + + - - -Improve the performance of processing long JSON strings using SIMD (Single Instruction Multiple Data) (David Rowley) -§ - - + + + Improve the performance of processing long JSON strings using + SIMD (Single Instruction Multiple Data) (David + Rowley) + § + + - - -Speed up CRC32C calculations using x86 AVX-512 instructions (Raghuveer Devulapalli, Paul Amonson) -§ - - + + + Speed up CRC32C calculations using x86 AVX-512 + instructions (Raghuveer Devulapalli, Paul Amonson) + § + + - - -Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranmoy Bhattacharya, Devanga Susmitha, Rama Malladi) -§ -§ - - + + + Add ARM Neon and SVE CPU + intrinsics for popcount (integer bit counting) (Chiranmoy + Bhattacharya, Devanga Susmitha, Rama Malladi) + § + § + + - - -Improve the speed of numeric multiplication and division (Joel Jacobson, Dean Rasheed) -§ -§ -§ -§ - - + + + Improve the speed of numeric multiplication and division (Joel + Jacobson, Dean Rasheed) + § + § + § + § + + - - -Add configure option to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot) -§ -§ -§ - + + + Add configure option + to enable NUMA awareness (Jakub Wartak, Bertrand + Drouvot) + § + § + § + - -The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across -NUMA nodes. - - + + The function pg_numa_available() + reports on NUMA awareness, and system views pg_shmem_allocations_numa + and pg_buffercache_numa + which report on shared memory distribution across + NUMA nodes. + + - - -Add TOAST table to pg_index to allow for very large expression indexes (Nathan Bossart) -§ - - + + + Add TOAST table to pg_index + to allow for very large expression indexes (Nathan Bossart) + § + + - - -Remove column -pg_attribute.attcacheoff (David Rowley) -§ - - + + + Remove column pg_attribute.attcacheoff + (David Rowley) + § + + - - -Add column pg_class.relallfrozen (Melanie Plageman) -§ - - + + + Add column pg_class.relallfrozen + (Melanie Plageman) + § + + - - -Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger) -§ -§ - - + + + Add amgettreeheight, + amconsistentequality, and + amconsistentordering to the index access method + API (Mark Dilger) + § + § + + - - -Add GiST support function stratnum() (Paul A. Jungwirth) -§ - - + + + Add GiST support function stratnum() + (Paul A. Jungwirth) + § + + - - -Record the default CPU signedness of char in (Masahiko Sawada) -§ - - + + + Record the default CPU signedness of + char in + (Masahiko Sawada) + § + + - - -Add support for Python "Limited API" in (Peter Eisentraut) -§ -§ - + + + Add support for Python "Limited API" in (Peter Eisentraut) + § + § + - -This helps prevent problems caused by Python 3.x version mismatches. - - + + This helps prevent problems caused by + Python 3.x version mismatches. + + - - -Change the minimum supported Python version to 3.6.8 (Jacob Champion) -§ - - + + + Change the minimum supported Python + version to 3.6.8 (Jacob Champion) + § + + - - -Remove support for OpenSSL versions older than 1.1.1 (Daniel Gustafsson) -§ -§ - - + + + Remove support for OpenSSL versions older + than 1.1.1 (Daniel Gustafsson) + § + § + + - - -If LLVM is enabled, require version 14 or later (Thomas Munro) -§ - - + + + If LLVM is enabled, require version 14 + or later (Thomas Munro) + § + + - - -Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov) -§ - + + + Add macro PG_MODULE_MAGIC_EXT + to allow extensions to report their name and version (Andrei Lepikhov) + § + - -This information can be access via the new function pg_get_loaded_modules(). - - + + This information can be access via the new function pg_get_loaded_modules(). + + - - -Document that SPI_connect()/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin) -§ - + + + Document that SPI_connect()/SPI_connect_ext() + always returns success (SPI_OK_CONNECT) (Stepan + Neretin) + § + - -Errors are always reported via ereport(). - - + + Errors are always reported via ereport(). + + - - -Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut) -§ - - + + + Add documentation + section about API and ABI + compatibility (David Wheeler, Peter Eisentraut) + § + + - - -Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev) -§ - - + + + Remove the experimental designation of + Meson builds on Windows (Aleksander Alekseev) + § + + - - -Remove configure options and (Thomas Munro) -§ -§ - + + + Remove configure options and + (Thomas Munro) + § + § + - -Thirty-two-bit atomic operations are now required. - - + + Thirty-two-bit atomic operations are now required. + + - - -Remove support for the HPPA/PA-RISC architecture (Tom Lane) -§ - - + + + Remove support for the + HPPA/PA-RISC architecture + (Tom Lane) + § + + @@ -3228,24 +3761,27 @@ Author: Masahiko Sawada 2024-10-14 [7cdfeee32] Add contrib/pg_logicalinspect. --> - - -Add extension to inspect logical snapshots (Bertrand Drouvot) -§ - - + + + Add extension to inspect logical + snapshots (Bertrand Drouvot) + § + + - - -Add extension which adds debug details to EXPLAIN output (Robert Haas) -§ - - + + + Add extension which adds debug details + to EXPLAIN + output (Robert Haas) + § + + - - -Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge) -§ -§ -§ -§ - + + + Add output columns to postgres_fdw_get_connections() + (Hayato Kuroda, Sagar Dilip Shedge) + § + § + § + § + - -New output column used_in_xact indicates if the foreign data wrapper is being used by a current transaction, closed indicates if it is closed, user_name indicates the -user name, and remote_backend_pid indicates the remote backend process identifier. - - + + New output column used_in_xact indicates + if the foreign data wrapper is being used by a current transaction, + closed indicates if it is closed, + user_name indicates the user name, and + remote_backend_pid indicates the remote + backend process identifier. + + - - -Allow SCRAM authentication from the client to be passed to - servers (Matheus Alcantara, Peter Eisentraut) -§ - + + + Allow SCRAM + authentication from the client to be passed to servers (Matheus Alcantara, Peter Eisentraut) + § + - -This avoids storing postgres_fdw authentication information in the database, and is enabled with the -postgres_fdw use_scram_passthrough connection option. libpq uses new connection -parameters and . - - + + This avoids storing postgres_fdw + authentication information in the database, and is + enabled with the postgres_fdw use_scram_passthrough + connection option. libpq uses new connection parameters + and . + + - - -Allow SCRAM authentication from the client to be passed to - servers (Matheus Alcantara) -§ - - + + + Allow SCRAM authentication from the client to be + passed to servers (Matheus Alcantara) + § + + - - -Add on_error and log_verbosity options to (Atsushi Torikoshi) -§ - + + + Add on_error and log_verbosity + options to (Atsushi Torikoshi) + § + - -These control how file_fdw handles and reports invalid file rows. - - + + These control how file_fdw handles and + reports invalid file rows. + + - - -Add reject_limit to control the number of invalid rows file_fdw can ignore (Atsushi Torikoshi) -§ - + + + Add reject_limit to control the number of + invalid rows file_fdw can ignore (Atsushi + Torikoshi) + § + - -This is active when ON_ERROR = 'ignore'. - - + + This is active when ON_ERROR = 'ignore'. + + - - -Add configurable variable min_password_length to - (Emanuele Musella, Maurizio Boriani) -§ - + + + Add configurable variable min_password_length to + (Emanuele Musella, Maurizio Boriani) + § + - -This controls the minimum password length. - - + + This controls the minimum password length. + + - - -Have report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata) -§ - - + + + Have report the number of failed, retried, + or skipped transactions in per-script reports (Yugo Nagata) + § + + - - -Add server variable weak to control invalid check digit acceptance (Viktor Holmberg) -§ - + + + Add server variable weak + to control invalid check digit acceptance (Viktor Holmberg) + § + - -This was previously only controlled by function isn_weak(). - - + + This was previously only controlled by function isn_weak(). + + - - -Allow values to be sorted to speed index builds (Bernd Helmle, Andrey Borodin) -§ - - + + + Allow values to be sorted to speed + index builds (Bernd Helmle, Andrey Borodin) + § + + - - -Add check function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin) -§ - - + + + Add check function gin_index_check() + to verify GIN indexes (Grigory Kryachko, Heikki + Linnakangas, Andrey Borodin) + § + + - - -Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz) -§ - + + + Add functions pg_buffercache_evict_relation() + and pg_buffercache_evict_all() + to evict unpinned shared buffers (Nazir Bilal Yavuz) + § + - -The existing function pg_buffercache_evict() now returns the buffer flush status. - - + + The existing function pg_buffercache_evict() + now returns the buffer flush status. + + - - -Allow extensions to install custom options (Robert Haas, Sami Imseih) -§ -§ -§ - - + + + Allow extensions to install custom + options (Robert Haas, Sami Imseih) + § + § + § + + - - -Allow extensions to use the server's cumulative statistics API (Michael Paquier) -§ -§ - - + + + Allow extensions to use the server's cumulative statistics + API (Michael Paquier) + § + § + + @@ -3467,60 +4033,71 @@ Author: Michael Paquier 2024-10-28 [6b652e6ce] Set query ID for inner queries of CREATE TABLE AS and DE --> - - -Allow the queries of and to be tracked by pg_stat_statements (Anthonin Bonnefoy) -§ - + + + Allow the queries of + and to be tracked by + pg_stat_statements (Anthonin Bonnefoy) + § + - -They are also now assigned query ids. - - + + They are also now assigned query ids. + + - - -Allow the parameterization of values in pg_stat_statements (Greg Sabino Mullane, Michael Paquier) -§ - + + + Allow the parameterization of values in + pg_stat_statements (Greg Sabino Mullane, + Michael Paquier) + § + - -This reduces the bloat caused by SET statements with differing constants. - - + + This reduces the bloat caused by SET statements + with differing constants. + + - - -Add pg_stat_statements columns to report parallel activity (Guillaume Lelarge) -§ - + + + Add pg_stat_statements + columns to report parallel activity (Guillaume Lelarge) + § + - -The new columns are parallel_workers_to_launch and parallel_workers_launched. - - + + The new columns are + parallel_workers_to_launch and + parallel_workers_launched. + + - - -Add pg_stat_statements.wal_buffers_full to report full WAL buffers (Bertrand Drouvot) -§ - - + + + Add + pg_stat_statements.wal_buffers_full + to report full WAL buffers (Bertrand Drouvot) + § + + @@ -3536,52 +4113,65 @@ Author: Álvaro Herrera 2025-04-05 [749a9e20c] Add modern SHA-2 based password hashes to pgcrypto. --> - - -Add pgcrypto algorithms sha256crypt and sha512crypt (Bernd Helmle) -§ - - + + + Add pgcrypto algorithms sha256crypt + and sha512crypt + (Bernd Helmle) + § + + - - -Add CFB mode to pgcrypto encryption and decryption (Umar Hayat) -§ - - + + + Add CFB mode + to pgcrypto encryption and decryption + (Umar Hayat) + § + + - - -Add function fips_mode() to report the server's FIPS mode (Daniel Gustafsson) -§ - - + + + Add function fips_mode() + to report the server's FIPS mode (Daniel + Gustafsson) + § + + - - -Add pgcrypto server variable builtin_crypto_enabled to allow disabling builtin non-FIPS mode cryptographic functions (Daniel Gustafsson, Joe Conway) -§ - + + + Add pgcrypto server variable builtin_crypto_enabled + to allow disabling builtin non-FIPS mode + cryptographic functions (Daniel Gustafsson, Joe Conway) + § + - -This is useful for guaranteeing FIPS mode behavior. - - + + This is useful for guaranteeing FIPS mode behavior. + + From fa638edc74ee4be90e94a45f8489f3be9a926d7e Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Fri, 20 Jun 2025 23:53:15 -0400 Subject: [PATCH 047/181] doc PG 18 relnotes: update to current, add one commit --- doc/src/sgml/release-18.sgml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index faf9156e18519..662c7d8890f9b 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -6,7 +6,7 @@ Release date: - 2025-??-??, CURRENT AS OF 2025-05-23 + 2025-??-??, CURRENT AS OF 2025-06-20 @@ -869,13 +869,16 @@ Author: Tom Lane Add server variable to log - lock acquisition failures (Yuki Seino) + lock acquisition failures (Yuki Seino, Fujii Masao) § + § From ea06263c4aa5abadc97a6928c6b2aff0e29698ae Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 21 Jun 2025 12:52:37 -0400 Subject: [PATCH 048/181] Doc: improve documentation about width_bucket(). Specify whether the bucket bounds are inclusive or exclusive, and improve some other vague language. Explain the behavior that occurs when the "low" bound is greater than the "high" bound. Make width_bucket_numeric's comment more like that for width_bucket_float8, in particular noting that infinite bounds are rejected (since they became possible in v14). Reported-by: Ben Peachey Higdon Author: Robert Treat Co-authored-by: Tom Lane Reviewed-by: Dean Rasheed Discussion: https://postgr.es/m/2BD74F86-5B89-4AC1-8F13-23CED3546AC1@gmail.com Backpatch-through: 13 --- doc/src/sgml/func.sgml | 18 ++++++++++++++---- src/backend/utils/adt/float.c | 4 ++-- src/backend/utils/adt/numeric.c | 7 ++++--- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 8d7d9a2f3e8e8..a6d79765c1a73 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1824,13 +1824,23 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in which operand falls in a histogram having count equal-width buckets spanning the range low to high. - Returns 0 + The buckets have inclusive lower bounds and exclusive upper bounds. + Returns 0 for an input less + than low, or count+1 for an input - outside that range. + greater than or equal to high. + If low > high, + the behavior is mirror-reversed, with bucket 1 + now being the one just below low, and the + inclusive bounds now being on the upper side. width_bucket(5.35, 0.024, 10.06, 5) 3 + + + width_bucket(9, 10, 0, 10) + 2 @@ -1842,8 +1852,8 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in Returns the number of the bucket in which operand falls given an array listing the - lower bounds of the buckets. Returns 0 for an - input less than the first lower + inclusive lower bounds of the buckets. + Returns 0 for an input less than the first lower bound. operand and the array elements can be of any type having standard comparison operators. The thresholds array must be diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 6d20ae07ae7b0..ba66a9c4ce63a 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -4065,8 +4065,8 @@ float84ge(PG_FUNCTION_ARGS) * in the histogram. width_bucket() returns an integer indicating the * bucket number that 'operand' belongs to in an equiwidth histogram * with the specified characteristics. An operand smaller than the - * lower bound is assigned to bucket 0. An operand greater than the - * upper bound is assigned to an additional bucket (with number + * lower bound is assigned to bucket 0. An operand greater than or equal + * to the upper bound is assigned to an additional bucket (with number * count+1). We don't allow "NaN" for any of the float8 inputs, and we * don't allow either of the histogram bounds to be +/- infinity. */ diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 40dcbc7b6710b..58ad1a65ef7b1 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -1958,9 +1958,10 @@ generate_series_numeric_support(PG_FUNCTION_ARGS) * in the histogram. width_bucket() returns an integer indicating the * bucket number that 'operand' belongs to in an equiwidth histogram * with the specified characteristics. An operand smaller than the - * lower bound is assigned to bucket 0. An operand greater than the - * upper bound is assigned to an additional bucket (with number - * count+1). We don't allow "NaN" for any of the numeric arguments. + * lower bound is assigned to bucket 0. An operand greater than or equal + * to the upper bound is assigned to an additional bucket (with number + * count+1). We don't allow "NaN" for any of the numeric inputs, and we + * don't allow either of the histogram bounds to be +/- infinity. */ Datum width_bucket_numeric(PG_FUNCTION_ARGS) From 2c0d8b95080e1d51c60d9c6f6a2e4460d6dfaf77 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sun, 22 Jun 2025 14:13:46 +0200 Subject: [PATCH 049/181] meson: Fix meson warning WARNING: You should add the boolean check kwarg to the run_command call. It currently defaults to false, but it will default to true in meson 2.0. Introduced by commit bc46104fc9a. (This only happens in the msvc branch. All the other run_command calls are ok.) Reviewed-by: Andres Freund Discussion: https://www.postgresql.org/message-id/flat/42e13eb0-862a-441e-8d84-4f0fd5f6def0%40eisentraut.org --- meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meson.build b/meson.build index d142e3e408b38..474763ad19f0c 100644 --- a/meson.build +++ b/meson.build @@ -1205,7 +1205,7 @@ if not perlopt.disabled() if cc.get_id() == 'msvc' # prevent binary mismatch between MSVC built plperl and Strawberry or # msys ucrt perl libraries - perl_v = run_command(perl, '-V').stdout() + perl_v = run_command(perl, '-V', check: false).stdout() if not perl_v.contains('USE_THREAD_SAFE_LOCALE') perl_ccflags += ['-DNO_THREAD_SAFE_LOCALE'] endif From 43da394304fba820830da2cef2c0214fe292c037 Mon Sep 17 00:00:00 2001 From: John Naylor Date: Mon, 23 Jun 2025 18:03:56 +0700 Subject: [PATCH 050/181] Properly fix AVX-512 CRC calculation bug The problem that led to the workaround in f83f14881c7 was not in fact a compiler bug, but a failure to zero the upper bits of the vector register containing the initial scalar CRC value. Fix that and revert the workaround. Diagnosed-by: Nathan Bossart Diagnosed-by: Raghuveer Devulapalli Tested-by: Andy Fan Tested-by: Soumyadeep Chakraborty Reviewed-by: Nathan Bossart Reviewed-by: Raghuveer Devulapalli Discussion: https://postgr.es/m/PH8PR11MB82866B07AA6758D12F699C00FB70A@PH8PR11MB8286.namprd11.prod.outlook.com --- src/port/pg_crc32c_sse42.c | 2 +- src/port/pg_crc32c_sse42_choose.c | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/port/pg_crc32c_sse42.c b/src/port/pg_crc32c_sse42.c index 9af3474a6ca95..1a7172553551a 100644 --- a/src/port/pg_crc32c_sse42.c +++ b/src/port/pg_crc32c_sse42.c @@ -123,7 +123,7 @@ pg_comp_crc32c_avx512(pg_crc32c crc, const void *data, size_t len) __m512i k; k = _mm512_broadcast_i32x4(_mm_setr_epi32(0x740eef02, 0, 0x9e4addf8, 0)); - x0 = _mm512_xor_si512(_mm512_castsi128_si512(_mm_cvtsi32_si128(crc0)), x0); + x0 = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(crc0)), x0); buf += 64; /* Main loop. */ diff --git a/src/port/pg_crc32c_sse42_choose.c b/src/port/pg_crc32c_sse42_choose.c index 802e47788c10c..74d2421ba2be9 100644 --- a/src/port/pg_crc32c_sse42_choose.c +++ b/src/port/pg_crc32c_sse42_choose.c @@ -95,9 +95,7 @@ pg_comp_crc32c_choose(pg_crc32c crc, const void *data, size_t len) __cpuidex(exx, 7, 0); #endif -#if defined(__clang__) && !defined(__OPTIMIZE__) - /* Some versions of clang are broken at -O0 */ -#elif defined(USE_AVX512_CRC32C_WITH_RUNTIME_CHECK) +#ifdef USE_AVX512_CRC32C_WITH_RUNTIME_CHECK if (exx[2] & (1 << 10) && /* VPCLMULQDQ */ exx[1] & (1 << 31)) /* AVX512-VL */ pg_comp_crc32c = pg_comp_crc32c_avx512; From ccd5bc93fdfeae22c935f405b0687be5cfa9caa4 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 23 Jun 2025 11:50:21 -0400 Subject: [PATCH 051/181] Include _mm512_zextsi128_si512() in AVX-512 configure probes. Commit 43da39430 added a dependency on this intrinsic to our AVX-512 CRC code. It turns out this intrinsic was added to gcc later than the other ones we were using, so that there are platforms where the new code fails to compile. Since only relatively old (pre-gcc-10) compilers are affected, it doesn't seem worth trying to make the AVX-512 CRC code actually work on these platforms. Just add the new intrinsic to the configure probe, so that we'll conclude the code can't be built. Author: Tom Lane Reviewed-by: Nathan Bossart Discussion: https://postgr.es/m/3350336.1750690281@sss.pgh.pa.us --- config/c-compiler.m4 | 1 + configure | 1 + meson.build | 1 + 3 files changed, 3 insertions(+) diff --git a/config/c-compiler.m4 b/config/c-compiler.m4 index 5f3e1d1faf930..da40bd6a64755 100644 --- a/config/c-compiler.m4 +++ b/config/c-compiler.m4 @@ -602,6 +602,7 @@ AC_CACHE_CHECK([for _mm512_clmulepi64_epi128], [Ac_cachevar], { __m128i z; + x = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(0)), x); y = _mm512_clmulepi64_epi128(x, y, 0); z = _mm_ternarylogic_epi64( _mm512_castsi512_si128(y), diff --git a/configure b/configure index 4f15347cc9503..3d3d3db97a456 100755 --- a/configure +++ b/configure @@ -18227,6 +18227,7 @@ else { __m128i z; + x = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(0)), x); y = _mm512_clmulepi64_epi128(x, y, 0); z = _mm_ternarylogic_epi64( _mm512_castsi512_si128(y), diff --git a/meson.build b/meson.build index 474763ad19f0c..6ffe7b4727556 100644 --- a/meson.build +++ b/meson.build @@ -2465,6 +2465,7 @@ int main(void) { __m128i z; + x = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(0)), x); y = _mm512_clmulepi64_epi128(x, y, 0); z = _mm_ternarylogic_epi64( _mm512_castsi512_si128(y), From 70d8a91f82f19d270facd25bb6292a949773dfce Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Mon, 23 Jun 2025 21:27:42 +0300 Subject: [PATCH 052/181] Remove excess assert from InvalidatePossiblyObsoleteSlot() ca307d5cec90 introduced keeping WAL segments by slot's last saved restart LSN. It also added an assertion that the slot's restart LSN never goes backward. However, situations when the restart LSN goes backward have been spotted by buildfarm animals and investigated in the thread. When pg_receivewal starts the replication, it sets the last replayed LSN to the beginning of the segment, which is older than what ReplicationSlotReserveWal() set for the slot. A similar situation can happen to pg_basebackup. When standby reconnects to the primary, it sends the last replayed LSN, which might be older than the last confirmed flush LSN. In both these situations, a concurrent checkpoint may trigger an assert trap. Based on ideas from Vitaly Davydov , Hayato Kuroda (Fujitsu) , Vignesh C , Amit Kapila . Reported-by: Vignesh C Reported-by: Tom Lane Discussion: https://postgr.es/m/CALDaNm3s-jpQTe1MshsvQ8GO%3DTLj233JCdkQ7uZ6pwqRVpxAdw%40mail.gmail.com Reviewed-by: Vignesh C Reviewed-by: Amit Kapila --- src/backend/replication/slot.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index c64f020742f8f..c11e588d63221 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -1810,8 +1810,6 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes, */ SpinLockAcquire(&s->mutex); - Assert(s->data.restart_lsn >= s->last_saved_restart_lsn); - restart_lsn = s->data.restart_lsn; /* we do nothing if the slot is already invalid */ From f3ed72ca0765bdd726a31b7fa20219e96baf312c Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Mon, 23 Jun 2025 21:33:50 +0300 Subject: [PATCH 053/181] Temporarily remove 046_checkpoint_logical_slot.pl This new test was intended to check the handling of the replication slot's restart lsn fixed in ca307d5cec90. However, it also reveals another issue related to logical decoding. This commit temporarily removes this test to keep the buildfarm and CFbot green and avoid distorting others' work. This test will be restored once we investigate and fix the issue. Discussion: https://postgr.es/m/CAAKRu_ZCOzQpEumLFgG_%2Biw3FTa%2BhJ4SRpxzaQBYxxM_ZAzWcA%40mail.gmail.com --- src/test/recovery/meson.build | 1 - .../recovery/t/046_checkpoint_logical_slot.pl | 136 ------------------ 2 files changed, 137 deletions(-) delete mode 100644 src/test/recovery/t/046_checkpoint_logical_slot.pl diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build index 92429d2840257..28fd9ae8dda0d 100644 --- a/src/test/recovery/meson.build +++ b/src/test/recovery/meson.build @@ -54,7 +54,6 @@ tests += { 't/043_no_contrecord_switch.pl', 't/044_invalidate_inactive_slots.pl', 't/045_archive_restartpoint.pl', - 't/046_checkpoint_logical_slot.pl', 't/047_checkpoint_physical_slot.pl' ], }, diff --git a/src/test/recovery/t/046_checkpoint_logical_slot.pl b/src/test/recovery/t/046_checkpoint_logical_slot.pl deleted file mode 100644 index d67c5108d7800..0000000000000 --- a/src/test/recovery/t/046_checkpoint_logical_slot.pl +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2025, PostgreSQL Global Development Group -# -# This test verifies the case when the logical slot is advanced during -# checkpoint. The test checks that the logical slot's restart_lsn still refers -# to an existed WAL segment after immediate restart. -# -use strict; -use warnings FATAL => 'all'; - -use PostgreSQL::Test::Cluster; -use PostgreSQL::Test::Utils; - -use Test::More; - -if ($ENV{enable_injection_points} ne 'yes') -{ - plan skip_all => 'Injection points not supported by this build'; -} - -my ($node, $result); - -$node = PostgreSQL::Test::Cluster->new('mike'); -$node->init; -$node->append_conf('postgresql.conf', "wal_level = 'logical'"); -$node->start; - -# Check if the extension injection_points is available, as it may be -# possible that this script is run with installcheck, where the module -# would not be installed by default. -if (!$node->check_extension('injection_points')) -{ - plan skip_all => 'Extension injection_points not installed'; -} - -$node->safe_psql('postgres', q(CREATE EXTENSION injection_points)); - -# Create the two slots we'll need. -$node->safe_psql('postgres', - q{select pg_create_logical_replication_slot('slot_logical', 'test_decoding')} -); -$node->safe_psql('postgres', - q{select pg_create_physical_replication_slot('slot_physical', true)}); - -# Advance both slots to the current position just to have everything "valid". -$node->safe_psql('postgres', - q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null)} -); -$node->safe_psql('postgres', - q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} -); - -# Run checkpoint to flush current state to disk and set a baseline. -$node->safe_psql('postgres', q{checkpoint}); - -# Generate some transactions to get RUNNING_XACTS. -my $xacts = $node->background_psql('postgres'); -$xacts->query_until( - qr/run_xacts/, - q(\echo run_xacts -SELECT 1 \watch 0.1 -\q -)); - -$node->advance_wal(20); - -# Run another checkpoint to set a new restore LSN. -$node->safe_psql('postgres', q{checkpoint}); - -$node->advance_wal(20); - -# Run another checkpoint, this time in the background, and make it wait -# on the injection point) so that the checkpoint stops right before -# removing old WAL segments. -note('starting checkpoint'); - -my $checkpoint = $node->background_psql('postgres'); -$checkpoint->query_safe( - q(select injection_points_attach('checkpoint-before-old-wal-removal','wait')) -); -$checkpoint->query_until( - qr/starting_checkpoint/, - q(\echo starting_checkpoint -checkpoint; -\q -)); - -# Wait until the checkpoint stops right before removing WAL segments. -note('waiting for injection_point'); -$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal'); -note('injection_point is reached'); - -# Try to advance the logical slot, but make it stop when it moves to the next -# WAL segment (this has to happen in the background, too). -my $logical = $node->background_psql('postgres'); -$logical->query_safe( - q{select injection_points_attach('logical-replication-slot-advance-segment','wait');} -); -$logical->query_until( - qr/get_changes/, - q( -\echo get_changes -select count(*) from pg_logical_slot_get_changes('slot_logical', null, null) \watch 1 -\q -)); - -# Wait until the slot's restart_lsn points to the next WAL segment. -note('waiting for injection_point'); -$node->wait_for_event('client backend', - 'logical-replication-slot-advance-segment'); -note('injection_point is reached'); - -# OK, we're in the right situation: time to advance the physical slot, which -# recalculates the required LSN, and then unblock the checkpoint, which -# removes the WAL still needed by the logical slot. -$node->safe_psql('postgres', - q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())} -); - -# Continue the checkpoint. -$node->safe_psql('postgres', - q{select injection_points_wakeup('checkpoint-before-old-wal-removal')}); - -# Abruptly stop the server (1 second should be enough for the checkpoint -# to finish; it would be better). -$node->stop('immediate'); - -$node->start; - -eval { - $node->safe_psql('postgres', - q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null);} - ); -}; -is($@, '', "Logical slot still valid"); - -done_testing(); From fc39b286ad7262a4aac8ff9a34f244763bed7a53 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 24 Jun 2025 13:12:46 +0900 Subject: [PATCH 054/181] psql: Rename meta-command \close to \close_prepared \close has been introduced in d55322b0da60 to be able to close a prepared statement using the extended protocol in psql. Per discussion, the name "close" is ambiguous. At the SQL level, CLOSE is used to close a cursor. At protocol level, the close message can be used to either close a statement or a portal. This patch renames \close to \close_prepared to avoid any ambiguity and make it clear that this is used to close a prepared statement. This new name has been chosen based on the feedback from the author and the reviewers. Author: Anthonin Bonnefoy Reviewed-by: Peter Eisentraut Reviewed-by: Jelte Fennema-Nio Discussion: https://postgr.es/m/3e694442-0df5-4f92-a08f-c5d4c4346b85@eisentraut.org --- doc/src/sgml/ref/psql-ref.sgml | 8 ++++---- doc/src/sgml/release-18.sgml | 2 +- src/bin/psql/command.c | 12 ++++++------ src/bin/psql/common.c | 2 +- src/bin/psql/help.c | 3 ++- src/bin/psql/tab-complete.in.c | 2 +- src/test/regress/expected/psql.out | 14 +++++++------- src/test/regress/expected/psql_pipeline.out | 6 +++--- src/test/regress/sql/psql.sql | 12 ++++++------ src/test/regress/sql/psql_pipeline.sql | 6 +++--- 10 files changed, 34 insertions(+), 33 deletions(-) diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index 570ef21d1fce3..95f4cac2467e3 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -1067,8 +1067,8 @@ INSERT INTO tbls1 VALUES ($1, $2) \parse stmt1 - - \close prepared_statement_name + + \close_prepared prepared_statement_name @@ -1081,7 +1081,7 @@ INSERT INTO tbls1 VALUES ($1, $2) \parse stmt1 Example: SELECT $1 \parse stmt1 -\close stmt1 +\close_prepared stmt1 @@ -3710,7 +3710,7 @@ testdb=> \setenv LESS -imx4F All queries executed while a pipeline is ongoing use the extended query protocol. Queries are appended to the pipeline when ending with a semicolon. The meta-commands \bind, - \bind_named, \close or + \bind_named, \close_prepared or \parse can be used in an ongoing pipeline. While a pipeline is ongoing, \sendpipeline will append the current query buffer to the pipeline. Other meta-commands like diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml index 662c7d8890f9b..66a6817a2be0f 100644 --- a/doc/src/sgml/release-18.sgml +++ b/doc/src/sgml/release-18.sgml @@ -2746,7 +2746,7 @@ Author: Michael Paquier \bind_named, and \close. + linkend="app-psql-meta-command-close-prepared">\close_prepared. diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 83e84a778411a..9fcd2db832656 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -67,8 +67,8 @@ static backslashResult exec_command_C(PsqlScanState scan_state, bool active_bran static backslashResult exec_command_connect(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd); -static backslashResult exec_command_close(PsqlScanState scan_state, bool active_branch, - const char *cmd); +static backslashResult exec_command_close_prepared(PsqlScanState scan_state, + bool active_branch, const char *cmd); static backslashResult exec_command_conninfo(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_copy(PsqlScanState scan_state, bool active_branch); static backslashResult exec_command_copyright(PsqlScanState scan_state, bool active_branch); @@ -330,8 +330,8 @@ exec_command(const char *cmd, status = exec_command_connect(scan_state, active_branch); else if (strcmp(cmd, "cd") == 0) status = exec_command_cd(scan_state, active_branch, cmd); - else if (strcmp(cmd, "close") == 0) - status = exec_command_close(scan_state, active_branch, cmd); + else if (strcmp(cmd, "close_prepared") == 0) + status = exec_command_close_prepared(scan_state, active_branch, cmd); else if (strcmp(cmd, "conninfo") == 0) status = exec_command_conninfo(scan_state, active_branch); else if (pg_strcasecmp(cmd, "copy") == 0) @@ -728,10 +728,10 @@ exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd) } /* - * \close -- close a previously prepared statement + * \close_prepared -- close a previously prepared statement */ static backslashResult -exec_command_close(PsqlScanState scan_state, bool active_branch, const char *cmd) +exec_command_close_prepared(PsqlScanState scan_state, bool active_branch, const char *cmd) { backslashResult status = PSQL_CMD_SKIP_LINE; diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index b53cd8ab69880..d2c0a49c46c04 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -2628,7 +2628,7 @@ clean_extended_state(void) switch (pset.send_mode) { - case PSQL_SEND_EXTENDED_CLOSE: /* \close */ + case PSQL_SEND_EXTENDED_CLOSE: /* \close_prepared */ free(pset.stmtName); break; case PSQL_SEND_EXTENDED_PARSE: /* \parse */ diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index db6adec8b692b..a2e009ab9bea7 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -331,7 +331,8 @@ slashUsage(unsigned short int pager) HELP0(" \\bind [PARAM]... set query parameters\n"); HELP0(" \\bind_named STMT_NAME [PARAM]...\n" " set query parameters for an existing prepared statement\n"); - HELP0(" \\close STMT_NAME close an existing prepared statement\n"); + HELP0(" \\close_prepared STMT_NAME\n" + " close an existing prepared statement\n"); HELP0(" \\endpipeline exit pipeline mode\n"); HELP0(" \\flush flush output data to the server\n"); HELP0(" \\flushrequest send request to the server to flush its output buffer\n"); diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index 2c0b4f28c14dd..908eef97c6e28 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -1875,7 +1875,7 @@ psql_completion(const char *text, int start, int end) static const char *const backslash_commands[] = { "\\a", "\\bind", "\\bind_named", - "\\connect", "\\conninfo", "\\C", "\\cd", "\\close", "\\copy", + "\\connect", "\\conninfo", "\\C", "\\cd", "\\close_prepared", "\\copy", "\\copyright", "\\crosstabview", "\\d", "\\da", "\\dA", "\\dAc", "\\dAf", "\\dAo", "\\dAp", "\\db", "\\dc", "\\dconfig", "\\dC", "\\dd", "\\ddp", "\\dD", diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out index cf48ae6d0c2ee..236eba2540e9d 100644 --- a/src/test/regress/expected/psql.out +++ b/src/test/regress/expected/psql.out @@ -160,12 +160,12 @@ LINE 1: SELECT $1, $2 foo4 | bar4 (1 row) --- \close (extended query protocol) -\close -\close: missing required argument -\close '' -\close stmt2 -\close stmt2 +-- \close_prepared (extended query protocol) +\close_prepared +\close_prepared: missing required argument +\close_prepared '' +\close_prepared stmt2 +\close_prepared stmt2 SELECT name, statement FROM pg_prepared_statements ORDER BY name; name | statement -------+---------------- @@ -4666,7 +4666,7 @@ bar 'bar' "bar" \C arg1 \c arg1 arg2 arg3 arg4 \cd arg1 - \close stmt1 + \close_prepared stmt1 \conninfo \copy arg1 arg2 arg3 arg4 arg5 arg6 \copyright diff --git a/src/test/regress/expected/psql_pipeline.out b/src/test/regress/expected/psql_pipeline.out index e78e6bfa0ad35..a0816fb10b68e 100644 --- a/src/test/regress/expected/psql_pipeline.out +++ b/src/test/regress/expected/psql_pipeline.out @@ -564,7 +564,7 @@ SELECT $1 \bind \sendpipeline SELECT $1 \bind 1 \sendpipeline SELECT $1 \parse a \bind_named a 1 \sendpipeline -\close a +\close_prepared a \flushrequest \getresults ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1 @@ -572,7 +572,7 @@ ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1 SELECT $1 \bind 1 \sendpipeline SELECT $1 \parse a \bind_named a 1 \sendpipeline -\close a +\close_prepared a -- Sync allows pipeline to recover. \syncpipeline \getresults @@ -580,7 +580,7 @@ Pipeline aborted, command did not run SELECT $1 \bind 1 \sendpipeline SELECT $1 \parse a \bind_named a 1 \sendpipeline -\close a +\close_prepared a \flushrequest \getresults ?column? diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql index 1a8a83462f022..e2e3124543978 100644 --- a/src/test/regress/sql/psql.sql +++ b/src/test/regress/sql/psql.sql @@ -68,11 +68,11 @@ SELECT $1, $2 \parse stmt3 -- Multiple \g calls mean multiple executions \bind_named stmt2 'foo3' \g \bind_named stmt3 'foo4' 'bar4' \g --- \close (extended query protocol) -\close -\close '' -\close stmt2 -\close stmt2 +-- \close_prepared (extended query protocol) +\close_prepared +\close_prepared '' +\close_prepared stmt2 +\close_prepared stmt2 SELECT name, statement FROM pg_prepared_statements ORDER BY name; -- \bind (extended query protocol) @@ -1035,7 +1035,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; \C arg1 \c arg1 arg2 arg3 arg4 \cd arg1 - \close stmt1 + \close_prepared stmt1 \conninfo \copy arg1 arg2 arg3 arg4 arg5 arg6 \copyright diff --git a/src/test/regress/sql/psql_pipeline.sql b/src/test/regress/sql/psql_pipeline.sql index 5945eca1ef76c..6788dceee2e90 100644 --- a/src/test/regress/sql/psql_pipeline.sql +++ b/src/test/regress/sql/psql_pipeline.sql @@ -306,21 +306,21 @@ SELECT $1 \bind \sendpipeline SELECT $1 \bind 1 \sendpipeline SELECT $1 \parse a \bind_named a 1 \sendpipeline -\close a +\close_prepared a \flushrequest \getresults -- Pipeline is aborted. SELECT $1 \bind 1 \sendpipeline SELECT $1 \parse a \bind_named a 1 \sendpipeline -\close a +\close_prepared a -- Sync allows pipeline to recover. \syncpipeline \getresults SELECT $1 \bind 1 \sendpipeline SELECT $1 \parse a \bind_named a 1 \sendpipeline -\close a +\close_prepared a \flushrequest \getresults \endpipeline From 6531f36283f05219464dd2084a17aab5747f7de8 Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Tue, 24 Jun 2025 09:51:07 +0530 Subject: [PATCH 055/181] Fix missing comment update in 1462aad2e4. Remove the part of comment that says we don't allow toggling two_phase option as that is supported in commit 1462aad2e4. Author: Hayato Kuroda Author: Amit Kapila Discussion: https://postgr.es/m/OSCPR01MB1496656725F3951AEE8749EBDF579A@OSCPR01MB14966.jpnprd01.prod.outlook.com --- src/backend/replication/logical/worker.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index a23262957acb5..fd11805a44cf9 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -109,13 +109,6 @@ * If ever a user needs to be aware of the tri-state value, they can fetch it * from the pg_subscription catalog (see column subtwophasestate). * - * We don't allow to toggle two_phase option of a subscription because it can - * lead to an inconsistent replica. Consider, initially, it was on and we have - * received some prepare then we turn it off, now at commit time the server - * will send the entire transaction data along with the commit. With some more - * analysis, we can allow changing this option from off to on but not sure if - * that alone would be useful. - * * Finally, to avoid problems mentioned in previous paragraphs from any * subsequent (not READY) tablesyncs (need to toggle two_phase option from 'on' * to 'off' and then again back to 'on') there is a restriction for From 0cb5145a32c1a867a157e18493e24930338f5d6f Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Tue, 24 Jun 2025 14:21:10 +0900 Subject: [PATCH 056/181] doc: Fix incorrect UUID index entry in function documentation. Previously, the UUID functions documentation defined the "UUID" index entry to link to the UUID data type page, even though that entry already exists there. Instead, the UUID functions page should define its own index entry linking to itself. This commit updates the UUID index entry in the UUID functions documentation to point to the correct section, improving navigation and avoiding duplication. Back-patch to all supported versions. Author: Fujii Masao Reviewed-by: Masahiko Sawada Reviewed-by: Daniel Gustafsson Discussion: https://postgr.es/m/f33e0493-5773-4296-87c5-7ce459054cfe@oss.nttdata.com Backpatch-through: 13 --- doc/src/sgml/func.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index a6d79765c1a73..224d4fe5a9f95 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -14384,7 +14384,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple UUID Functions - + UUID generating From 49fe1c83ecf3474776ea9d0db47ae5644d29b67b Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 24 Jun 2025 11:30:49 +0200 Subject: [PATCH 057/181] Fix virtual generated column type checking for ALTER TABLE Virtual generated columns have some special checks in CheckAttributeType(), mainly to check that domains are not used. But this check was only applied during CREATE TABLE, not during ALTER TABLE. This fixes that. Reported-by: jian he Discussion: https://www.postgresql.org/message-id/CACJufxE0KHR__-h=zHXbhSNZXMMs4LYo4-dbj8H3YoStYBok1Q@mail.gmail.com --- src/backend/commands/tablecmds.c | 4 ++-- src/test/regress/expected/generated_virtual.out | 6 ++++++ src/test/regress/sql/generated_virtual.sql | 5 +++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index ea96947d81305..074ddb6b9cd17 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -7374,7 +7374,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, /* make sure datatype is legal for a column */ CheckAttributeType(NameStr(attribute->attname), attribute->atttypid, attribute->attcollation, list_make1_oid(rel->rd_rel->reltype), - 0); + (attribute->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL ? CHKATYPE_IS_VIRTUAL : 0)); InsertPgAttributeTuples(attrdesc, tupdesc, myrelid, NULL, NULL); @@ -14426,7 +14426,7 @@ ATPrepAlterColumnType(List **wqueue, /* make sure datatype is legal for a column */ CheckAttributeType(colName, targettype, targetcollid, list_make1_oid(rel->rd_rel->reltype), - 0); + (attTup->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL ? CHKATYPE_IS_VIRTUAL : 0)); if (attTup->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL) { diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out index 6300e7c1d96e1..ab35a77477445 100644 --- a/src/test/regress/expected/generated_virtual.out +++ b/src/test/regress/expected/generated_virtual.out @@ -800,6 +800,12 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A ERROR: virtual generated column "b" cannot have a domain type --INSERT INTO gtest24r (a) VALUES (4); -- ok --INSERT INTO gtest24r (a) VALUES (6); -- error +CREATE TABLE gtest24at (a int PRIMARY KEY); +ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error +ERROR: virtual generated column "b" cannot have a domain type +CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL); +ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error +ERROR: virtual generated column "b" cannot have a domain type CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL); CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL); ERROR: virtual generated column "b" cannot have a domain type diff --git a/src/test/regress/sql/generated_virtual.sql b/src/test/regress/sql/generated_virtual.sql index b4eedeee2fb27..9011c9d26745f 100644 --- a/src/test/regress/sql/generated_virtual.sql +++ b/src/test/regress/sql/generated_virtual.sql @@ -453,6 +453,11 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A --INSERT INTO gtest24r (a) VALUES (4); -- ok --INSERT INTO gtest24r (a) VALUES (6); -- error +CREATE TABLE gtest24at (a int PRIMARY KEY); +ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error +CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL); +ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error + CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL); CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL); --INSERT INTO gtest24nn (a) VALUES (4); -- ok From 054beebb7c9fb94ed17bda9381dbd204f32adf42 Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Tue, 24 Jun 2025 11:49:37 +0200 Subject: [PATCH 058/181] doc: Remove dead link to NewbieDoc Docbook Guide The link returns 404 and no replacement is available in the project on Sourceforge where the content once was. Since we already link to resources for both beginner and experienced docs hackers, remove the the dead link. Backpatch to all supported versions as the link was added in 8.1. Author: Daniel Gustafsson Reviewed-by: Magnus Hagander Reviewed-by: Michael Paquier Reported-by: jian he Discussion: https://postgr.es/m/CACJufxH=YzQPDOe+2WuYZ7seD-BOyjCBmP6JiErpoSiVZWDRnw@mail.gmail.com Backpatch-through: 13 --- doc/src/sgml/docguide.sgml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml index db4bcce56eac6..7b61b4841aa03 100644 --- a/doc/src/sgml/docguide.sgml +++ b/doc/src/sgml/docguide.sgml @@ -60,9 +60,7 @@ maintained by the OASIS group. The official DocBook site has good introductory and reference documentation and - a complete O'Reilly book for your online reading pleasure. The - - NewbieDoc Docbook Guide is very helpful for beginners. + a complete O'Reilly book for your online reading pleasure. The FreeBSD Documentation Project also uses DocBook and has some good information, including a number of style guidelines that might be From 303ba0573ce656b98620133cd17418dcd217318f Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Tue, 24 Jun 2025 09:20:16 -0400 Subject: [PATCH 059/181] Test that vacuum removes tuples older than OldestXmin If vacuum fails to prune a tuple killed before OldestXmin, it will decide to freeze its xmax and later error out in pre-freeze checks. Add a test reproducing this scenario to the recovery suite which creates a table on a primary, updates the table to generate dead tuples for vacuum, and then, during the vacuum, uses a replica to force GlobalVisState->maybe_needed on the primary to move backwards and precede the value of OldestXmin set at the beginning of vacuuming the table. This test is coverage for a case fixed in 83c39a1f7f3. The test was originally committed to master in aa607980aee but later reverted in efcbb76efe4 due to test instability. The test requires multiple index passes. In Postgres 17+, vacuum uses a TID store for the dead TIDs that is very space efficient. With the old minimum maintenance_work_mem of 1 MB, it required a large number of dead rows to generate enough dead TIDs to force multiple index vacuuming passes. Once the source code changes were made to allow a minimum maintenance_work_mem value of 64kB, the test could be made much faster and more stable. Author: Melanie Plageman Reviewed-by: John Naylor Reviewed-by: Peter Geoghegan Discussion: https://postgr.es/m/CAAKRu_ZJBkidusDut6i%3DbDCiXzJEp93GC1%2BNFaZt4eqanYF3Kw%40mail.gmail.com Backpatch-through: 17 --- src/test/recovery/meson.build | 3 +- .../recovery/t/048_vacuum_horizon_floor.pl | 278 ++++++++++++++++++ 2 files changed, 280 insertions(+), 1 deletion(-) create mode 100644 src/test/recovery/t/048_vacuum_horizon_floor.pl diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build index 28fd9ae8dda0d..6e78ff1a030b3 100644 --- a/src/test/recovery/meson.build +++ b/src/test/recovery/meson.build @@ -54,7 +54,8 @@ tests += { 't/043_no_contrecord_switch.pl', 't/044_invalidate_inactive_slots.pl', 't/045_archive_restartpoint.pl', - 't/047_checkpoint_physical_slot.pl' + 't/047_checkpoint_physical_slot.pl', + 't/048_vacuum_horizon_floor.pl' ], }, } diff --git a/src/test/recovery/t/048_vacuum_horizon_floor.pl b/src/test/recovery/t/048_vacuum_horizon_floor.pl new file mode 100644 index 0000000000000..d48a6ef7e0f24 --- /dev/null +++ b/src/test/recovery/t/048_vacuum_horizon_floor.pl @@ -0,0 +1,278 @@ +use strict; +use warnings; +use PostgreSQL::Test::Cluster; +use Test::More; + +# Test that vacuum prunes away all dead tuples killed before OldestXmin +# +# This test creates a table on a primary, updates the table to generate dead +# tuples for vacuum, and then, during the vacuum, uses the replica to force +# GlobalVisState->maybe_needed on the primary to move backwards and precede +# the value of OldestXmin set at the beginning of vacuuming the table. + +# Set up nodes +my $node_primary = PostgreSQL::Test::Cluster->new('primary'); +$node_primary->init(allows_streaming => 'physical'); + +# io_combine_limit is set to 1 to avoid pinning more than one buffer at a time +# to ensure test determinism. +$node_primary->append_conf( + 'postgresql.conf', qq[ +hot_standby_feedback = on +autovacuum = off +log_min_messages = INFO +maintenance_work_mem = 64 +io_combine_limit = 1 +]); +$node_primary->start; + +my $node_replica = PostgreSQL::Test::Cluster->new('standby'); + +$node_primary->backup('my_backup'); +$node_replica->init_from_backup($node_primary, 'my_backup', + has_streaming => 1); + +$node_replica->start; + +my $test_db = "test_db"; +$node_primary->safe_psql('postgres', "CREATE DATABASE $test_db"); + +# Save the original connection info for later use +my $orig_conninfo = $node_primary->connstr(); + +my $table1 = "vac_horizon_floor_table"; + +# Long-running Primary Session A +my $psql_primaryA = + $node_primary->background_psql($test_db, on_error_stop => 1); + +# Long-running Primary Session B +my $psql_primaryB = + $node_primary->background_psql($test_db, on_error_stop => 1); + +# Our test relies on two rounds of index vacuuming for reasons elaborated +# later. To trigger two rounds of index vacuuming, we must fill up the +# TIDStore with dead items partway through a vacuum of the table. The number +# of rows is just enough to ensure we exceed maintenance_work_mem on all +# supported platforms, while keeping test runtime as short as we can. +my $nrows = 2000; + +# Because vacuum's first pass, pruning, is where we use the GlobalVisState to +# check tuple visibility, GlobalVisState->maybe_needed must move backwards +# during pruning before checking the visibility for a tuple which would have +# been considered HEAPTUPLE_DEAD prior to maybe_needed moving backwards but +# HEAPTUPLE_RECENTLY_DEAD compared to the new, older value of maybe_needed. +# +# We must not only force the horizon on the primary to move backwards but also +# force the vacuuming backend's GlobalVisState to be updated. GlobalVisState +# is forced to update during index vacuuming. +# +# _bt_pendingfsm_finalize() calls GetOldestNonRemovableTransactionId() at the +# end of a round of index vacuuming, updating the backend's GlobalVisState +# and, in our case, moving maybe_needed backwards. +# +# Then vacuum's first (pruning) pass will continue and pruning will find our +# later inserted and updated tuple HEAPTUPLE_RECENTLY_DEAD when compared to +# maybe_needed but HEAPTUPLE_DEAD when compared to OldestXmin. +# +# Thus, we must force at least two rounds of index vacuuming to ensure that +# some tuple visibility checks will happen after a round of index vacuuming. +# To accomplish this, we set maintenance_work_mem to its minimum value and +# insert and delete enough rows that we force at least one round of index +# vacuuming before getting to a dead tuple which was killed after the standby +# is disconnected. +$node_primary->safe_psql($test_db, qq[ + CREATE TABLE ${table1}(col1 int) + WITH (autovacuum_enabled=false, fillfactor=10); + INSERT INTO $table1 VALUES(7); + INSERT INTO $table1 SELECT generate_series(1, $nrows) % 3; + CREATE INDEX on ${table1}(col1); + DELETE FROM $table1 WHERE col1 = 0; + INSERT INTO $table1 VALUES(7); +]); + +# We will later move the primary forward while the standby is disconnected. +# For now, however, there is no reason not to wait for the standby to catch +# up. +my $primary_lsn = $node_primary->lsn('flush'); +$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn); + +# Test that the WAL receiver is up and running. +$node_replica->poll_query_until($test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); + +# Set primary_conninfo to something invalid on the replica and reload the +# config. Once the config is reloaded, the startup process will force the WAL +# receiver to restart and it will be unable to reconnect because of the +# invalid connection information. +$node_replica->safe_psql($test_db, qq[ + ALTER SYSTEM SET primary_conninfo = ''; + SELECT pg_reload_conf(); + ]); + +# Wait until the WAL receiver has shut down and been unable to start up again. +$node_replica->poll_query_until($test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); + +# Now insert and update a tuple which will be visible to the vacuum on the +# primary but which will have xmax newer than the oldest xmin on the standby +# that was recently disconnected. +my $res = $psql_primaryA->query_safe( + qq[ + INSERT INTO $table1 VALUES (99); + UPDATE $table1 SET col1 = 100 WHERE col1 = 99; + SELECT 'after_update'; + ] + ); + +# Make sure the UPDATE finished +like($res, qr/^after_update$/m, "UPDATE occurred on primary session A"); + +# Open a cursor on the primary whose pin will keep VACUUM from getting a +# cleanup lock on the first page of the relation. We want VACUUM to be able to +# start, calculate initial values for OldestXmin and GlobalVisState and then +# be unable to proceed with pruning our dead tuples. This will allow us to +# reconnect the standby and push the horizon back before we start actual +# pruning and vacuuming. +my $primary_cursor1 = "vac_horizon_floor_cursor1"; + +# The first value inserted into the table was a 7, so FETCH FORWARD should +# return a 7. That's how we know the cursor has a pin. +# Disable index scans so the cursor pins heap pages and not index pages. +$res = $psql_primaryB->query_safe( + qq[ + BEGIN; + SET enable_bitmapscan = off; + SET enable_indexscan = off; + SET enable_indexonlyscan = off; + DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7; + FETCH $primary_cursor1; + ] + ); + +is($res, 7, qq[Cursor query returned $res. Expected value 7.]); + +# Get the PID of the session which will run the VACUUM FREEZE so that we can +# use it to filter pg_stat_activity later. +my $vacuum_pid = $psql_primaryA->query_safe("SELECT pg_backend_pid();"); + +# Now start a VACUUM FREEZE on the primary. It will call vacuum_get_cutoffs() +# and establish values of OldestXmin and GlobalVisState which are newer than +# all of our dead tuples. Then it will be unable to get a cleanup lock to +# start pruning, so it will hang. +# +# We use VACUUM FREEZE because it will wait for a cleanup lock instead of +# skipping the page pinned by the cursor. Note that works because the target +# tuple's xmax precedes OldestXmin which ensures that lazy_scan_noprune() will +# return false and we will wait for the cleanup lock. +# +# Disable any prefetching, parallelism, or other concurrent I/O by vacuum. The +# pages of the heap must be processed in order by a single worker to ensure +# test stability (PARALLEL 0 shouldn't be necessary but guards against the +# possibility of parallel heap vacuuming). +$psql_primaryA->{stdin} .= qq[ + SET maintenance_io_concurrency = 0; + VACUUM (VERBOSE, FREEZE, PARALLEL 0) $table1; + \\echo VACUUM + ]; + +# Make sure the VACUUM command makes it to the server. +$psql_primaryA->{run}->pump_nb(); + +# Make sure that the VACUUM has already called vacuum_get_cutoffs() and is +# just waiting on the lock to start vacuuming. We don't want the standby to +# re-establish a connection to the primary and push the horizon back until +# we've saved initial values in GlobalVisState and calculated OldestXmin. +$node_primary->poll_query_until($test_db, + qq[ + SELECT count(*) >= 1 FROM pg_stat_activity + WHERE pid = $vacuum_pid + AND wait_event = 'BufferPin'; + ], + 't'); + +# Ensure the WAL receiver is still not active on the replica. +$node_replica->poll_query_until($test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); + +# Allow the WAL receiver connection to re-establish. +$node_replica->safe_psql( + $test_db, qq[ + ALTER SYSTEM SET primary_conninfo = '$orig_conninfo'; + SELECT pg_reload_conf(); + ]); + +# Ensure the new WAL receiver has connected. +$node_replica->poll_query_until($test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); + +# Once the WAL sender is shown on the primary, the replica should have +# connected with the primary and pushed the horizon backward. Primary Session +# A won't see that until the VACUUM FREEZE proceeds and does its first round +# of index vacuuming. +$node_primary->poll_query_until($test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_replication);] , 't'); + +# Move the cursor forward to the next 7. We inserted the 7 much later, so +# advancing the cursor should allow vacuum to proceed vacuuming most pages of +# the relation. Because we set maintanence_work_mem sufficiently low, we +# expect that a round of index vacuuming has happened and that the vacuum is +# now waiting for the cursor to release its pin on the last page of the +# relation. +$res = $psql_primaryB->query_safe("FETCH $primary_cursor1"); +is($res, 7, + qq[Cursor query returned $res from second fetch. Expected value 7.]); + +# Prevent the test from incorrectly passing by confirming that we did indeed +# do a pass of index vacuuming. +$node_primary->poll_query_until($test_db, qq[ + SELECT index_vacuum_count > 0 + FROM pg_stat_progress_vacuum + WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass; + ] , 't'); + +# Commit the transaction with the open cursor so that the VACUUM can finish. +$psql_primaryB->query_until( + qr/^commit$/m, + qq[ + COMMIT; + \\echo commit + ] + ); + +# VACUUM proceeds with pruning and does a visibility check on each tuple. In +# older versions of Postgres, pruning found our final dead tuple +# non-removable (HEAPTUPLE_RECENTLY_DEAD) since its xmax is after the new +# value of maybe_needed. Then heap_prepare_freeze_tuple() would decide the +# tuple xmax should be frozen because it precedes OldestXmin. Vacuum would +# then error out in heap_pre_freeze_checks() with "cannot freeze committed +# xmax". This was fixed by changing pruning to find all +# HEAPTUPLE_RECENTLY_DEAD tuples with xmaxes preceding OldestXmin +# HEAPTUPLE_DEAD and removing them. + +# With the fix, VACUUM should finish successfully, incrementing the table +# vacuum_count. +$node_primary->poll_query_until($test_db, + qq[ + SELECT vacuum_count > 0 + FROM pg_stat_all_tables WHERE relname = '${table1}'; + ] + , 't'); + +$primary_lsn = $node_primary->lsn('flush'); + +# Make sure something causes us to flush +$node_primary->safe_psql($test_db, "INSERT INTO $table1 VALUES (1);"); + +# Nothing on the replica should cause a recovery conflict, so this should +# finish successfully. +$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn); + +## Shut down psqls +$psql_primaryA->quit; +$psql_primaryB->quit; + +$node_replica->stop(); +$node_primary->stop(); + +done_testing(); From debad29d22152d7fe4c4e671090e20238647c460 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Herrera?= Date: Tue, 24 Jun 2025 19:36:12 +0200 Subject: [PATCH 060/181] Improve jumble squashing through CoerceViaIO and RelabelType There's no principled reason for query jumbling to only remove the first layer of RelabelType and CoerceViaIO. Change it to see through as many layers as there are. --- .../pg_stat_statements/expected/squashing.out | 40 +++++++++++-------- contrib/pg_stat_statements/sql/squashing.sql | 9 +++-- src/backend/nodes/queryjumblefuncs.c | 27 ++++++++----- 3 files changed, 45 insertions(+), 31 deletions(-) diff --git a/contrib/pg_stat_statements/expected/squashing.out b/contrib/pg_stat_statements/expected/squashing.out index 7b935d464ecff..e978564ad7299 100644 --- a/contrib/pg_stat_statements/expected/squashing.out +++ b/contrib/pg_stat_statements/expected/squashing.out @@ -645,7 +645,7 @@ SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 (2 rows) --- Multiple CoerceViaIO wrapping a constant. Will not squash +-- Multiple CoerceViaIO are squashed SELECT pg_stat_statements_reset() IS NOT NULL AS t; t --- @@ -661,10 +661,10 @@ SELECT WHERE 1 = ANY(ARRAY[1::text::int::text::int, 1::text::int::text::int]); (1 row) SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; - query | calls --------------------------------------------------------------------------+------- - SELECT WHERE $1 IN ($2::text::int::text::int, $3::text::int::text::int) | 2 - SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 + query | calls +----------------------------------------------------+------- + SELECT WHERE $1 IN ($2 /*, ... */) | 2 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 (2 rows) -- @@ -676,7 +676,7 @@ SELECT pg_stat_statements_reset() IS NOT NULL AS t; t (1 row) --- if there is only one level of RelabelType, the list will be squashable +-- However many layers of RelabelType there are, the list will be squashable. SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid); id | data @@ -689,8 +689,6 @@ SELECT ARRAY[1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9:: {1,2,3,4,5,6,7,8,9} (1 row) --- if there is at least one element with multiple levels of RelabelType, --- the list will not be squashable SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid::int::oid); id | data ----+------ @@ -701,15 +699,25 @@ SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid, 2::oid::int::oid]); ----+------ (0 rows) +-- RelabelType together with CoerceViaIO is also squashable +SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid::text::int::oid, 2::oid::int::oid]); + id | data +----+------ +(0 rows) + +SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::text::int::oid, 2::oid::int::oid]); + id | data +----+------ +(0 rows) + SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; - query | calls ---------------------------------------------------------------------+------- - SELECT * FROM test_squash WHERE id IN +| 1 - ($1 /*, ... */) | - SELECT * FROM test_squash WHERE id IN ($1::oid, $2::oid::int::oid) | 2 - SELECT ARRAY[$1 /*, ... */] | 1 - SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 -(4 rows) + query | calls +----------------------------------------------------+------- + SELECT * FROM test_squash WHERE id IN +| 5 + ($1 /*, ... */) | + SELECT ARRAY[$1 /*, ... */] | 1 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 +(3 rows) -- -- edge cases diff --git a/contrib/pg_stat_statements/sql/squashing.sql b/contrib/pg_stat_statements/sql/squashing.sql index bd3243ec9cd85..946e149831c94 100644 --- a/contrib/pg_stat_statements/sql/squashing.sql +++ b/contrib/pg_stat_statements/sql/squashing.sql @@ -234,7 +234,7 @@ SELECT * FROM test_squash_jsonb WHERE data = ANY(ARRAY (SELECT '"10"')::jsonb]); SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; --- Multiple CoerceViaIO wrapping a constant. Will not squash +-- Multiple CoerceViaIO are squashed SELECT pg_stat_statements_reset() IS NOT NULL AS t; SELECT WHERE 1 IN (1::text::int::text::int, 1::text::int::text::int); SELECT WHERE 1 = ANY(ARRAY[1::text::int::text::int, 1::text::int::text::int]); @@ -245,14 +245,15 @@ SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; -- SELECT pg_stat_statements_reset() IS NOT NULL AS t; --- if there is only one level of RelabelType, the list will be squashable +-- However many layers of RelabelType there are, the list will be squashable. SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid); SELECT ARRAY[1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid]; --- if there is at least one element with multiple levels of RelabelType, --- the list will not be squashable SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid::int::oid); SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid, 2::oid::int::oid]); +-- RelabelType together with CoerceViaIO is also squashable +SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid::text::int::oid, 2::oid::int::oid]); +SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::text::int::oid, 2::oid::int::oid]); SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; -- diff --git a/src/backend/nodes/queryjumblefuncs.c b/src/backend/nodes/queryjumblefuncs.c index fb33e6931ada3..62e3a677cd191 100644 --- a/src/backend/nodes/queryjumblefuncs.c +++ b/src/backend/nodes/queryjumblefuncs.c @@ -414,7 +414,7 @@ RecordConstLocation(JumbleState *jstate, int location, int len) * Subroutine for _jumbleElements: Verify a few simple cases where we can * deduce that the expression is a constant: * - * - Ignore a possible wrapping RelabelType and CoerceViaIO. + * - See through any wrapping RelabelType and CoerceViaIO layers. * - If it's a FuncExpr, check that the function is a builtin * cast and its arguments are Const. * - Otherwise test if the expression is a simple Const. @@ -422,14 +422,22 @@ RecordConstLocation(JumbleState *jstate, int location, int len) static bool IsSquashableConstant(Node *element) { - if (IsA(element, RelabelType)) - element = (Node *) ((RelabelType *) element)->arg; - - if (IsA(element, CoerceViaIO)) - element = (Node *) ((CoerceViaIO *) element)->arg; - +restart: switch (nodeTag(element)) { + case T_RelabelType: + /* Unwrap RelabelType */ + element = (Node *) ((RelabelType *) element)->arg; + goto restart; + + case T_CoerceViaIO: + /* Unwrap CoerceViaIO */ + element = (Node *) ((CoerceViaIO *) element)->arg; + goto restart; + + case T_Const: + return true; + case T_FuncExpr: { FuncExpr *func = (FuncExpr *) element; @@ -468,11 +476,8 @@ IsSquashableConstant(Node *element) } default: - if (!IsA(element, Const)) - return false; + return false; } - - return true; } /* From c2da1a5d6325a92d834c9cb036f65d362e4bfc3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Herrera?= Date: Tue, 24 Jun 2025 19:36:32 +0200 Subject: [PATCH 061/181] Make query jumbling also squash PARAM_EXTERN params Commit 62d712ecfd94 made query jumbling squash lists of Consts as a single element, but there's no reason not to treat PARAM_EXTERN parameters the same. For these purposes, these values are indeed constants for any particular execution of a query. In particular, this should make list squashing more useful for applications using extended query protocol, which would use parameters extensively. A complication arises: if a query has both external parameters and squashable lists, then the parameter number used as placeholder for the squashed list might be inconsistent with regards to the parameter numbers used by the query literal. To reduce the surprise factor, all parameters are renumbered starting from 1 in that case. Author: Sami Imseih Author: Dmitry Dolgov <9erthalion6@gmail.com> Reviewed-by: Michael Paquier Discussion: https://postgr.es/m/CAA5RZ0tRXoPG2y6bMgBCWNDt0Tn=unRerbzYM=oW0syi1=C1OA@mail.gmail.com --- .../pg_stat_statements/expected/extended.out | 60 +++++-- .../pg_stat_statements/expected/squashing.out | 26 +-- .../pg_stat_statements/pg_stat_statements.c | 10 ++ contrib/pg_stat_statements/sql/extended.sql | 11 +- contrib/pg_stat_statements/sql/squashing.sql | 4 +- src/backend/nodes/queryjumblefuncs.c | 160 +++++++++++------- src/include/nodes/primnodes.h | 6 +- src/include/nodes/queryjumble.h | 19 ++- 8 files changed, 198 insertions(+), 98 deletions(-) diff --git a/contrib/pg_stat_statements/expected/extended.out b/contrib/pg_stat_statements/expected/extended.out index 7da308ba84f4f..1bfd0c1ca242f 100644 --- a/contrib/pg_stat_statements/expected/extended.out +++ b/contrib/pg_stat_statements/expected/extended.out @@ -69,13 +69,13 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"; (4 rows) -- Various parameter numbering patterns +-- Unique query IDs with parameter numbers switched. SELECT pg_stat_statements_reset() IS NOT NULL AS t; t --- t (1 row) --- Unique query IDs with parameter numbers switched. SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g -- (0 rows) @@ -96,7 +96,24 @@ SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g -- (0 rows) +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + query | calls +--------------------------------------------------------------+------- + SELECT WHERE $1::int IN ($2 /*, ... */) | 1 + SELECT WHERE $1::int IN ($2 /*, ... */) | 1 + SELECT WHERE $1::int IN ($2 /*, ... */) | 1 + SELECT WHERE ($1::int, $4) IN (($5, $2::int), ($3::int, $6)) | 1 + SELECT WHERE ($2::int, $4) IN (($5, $3::int), ($1::int, $6)) | 1 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 +(6 rows) + -- Two groups of two queries with the same query ID. +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g -- (1 row) @@ -114,15 +131,34 @@ SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g (0 rows) SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; - query | calls ---------------------------------------------------------------+------- - SELECT WHERE $1::int IN ($2::int, $3::int) | 1 - SELECT WHERE $2::int IN ($1::int, $3::int) | 2 - SELECT WHERE $2::int IN ($1::int, $3::int) | 2 - SELECT WHERE $2::int IN ($3::int, $1::int) | 1 - SELECT WHERE $3::int IN ($1::int, $2::int) | 1 - SELECT WHERE ($1::int, $4) IN (($5, $2::int), ($3::int, $6)) | 1 - SELECT WHERE ($2::int, $4) IN (($5, $3::int), ($1::int, $6)) | 1 - SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 -(8 rows) + query | calls +----------------------------------------------------+------- + SELECT WHERE $1::int IN ($2 /*, ... */) | 2 + SELECT WHERE $1::int IN ($2 /*, ... */) | 2 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 +(3 rows) + +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + t +--- + t +(1 row) + +-- no squashable list, the parameters id's are kept as-is +SELECT WHERE $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g +-- +(1 row) + +-- squashable list, so the parameter IDs will be re-assigned +SELECT WHERE 1 IN (1, 2, 3) AND $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g +-- +(1 row) + +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; + query | calls +------------------------------------------------------------+------- + SELECT WHERE $1 IN ($2 /*, ... */) AND $3 = $4 AND $5 = $6 | 1 + SELECT WHERE $3 = $1 AND $2 = $4 | 1 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 +(3 rows) diff --git a/contrib/pg_stat_statements/expected/squashing.out b/contrib/pg_stat_statements/expected/squashing.out index e978564ad7299..f952f47ef7be1 100644 --- a/contrib/pg_stat_statements/expected/squashing.out +++ b/contrib/pg_stat_statements/expected/squashing.out @@ -103,7 +103,7 @@ SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 (2 rows) --- external parameters will not be squashed +-- external parameters will be squashed SELECT pg_stat_statements_reset() IS NOT NULL AS t; t --- @@ -123,14 +123,14 @@ SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) \bind (0 rows) SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; - query | calls ----------------------------------------------------------------------------+------- - SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) | 1 - SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) | 1 - SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 + query | calls +----------------------------------------------------------------------+------- + SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 1 + SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1 /*, ... */]) | 1 + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 (3 rows) --- neither are prepared statements +-- prepared statements will also be squashed -- the IN and ARRAY forms of this statement will have the same queryId SELECT pg_stat_statements_reset() IS NOT NULL AS t; t @@ -155,12 +155,12 @@ EXECUTE p1(1, 2, 3, 4, 5); DEALLOCATE p1; SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; - query | calls -------------------------------------------------------------+------- - DEALLOCATE $1 | 2 - PREPARE p1(int, int, int, int, int) AS +| 2 - SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) | - SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 + query | calls +-------------------------------------------------------+------- + DEALLOCATE $1 | 2 + PREPARE p1(int, int, int, int, int) AS +| 2 + SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | + SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1 (3 rows) -- More conditions in the query diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index ecc7f2fb2663f..5597fcaaa053d 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2841,6 +2841,16 @@ generate_normalized_query(JumbleState *jstate, const char *query, int off, /* Offset from start for cur tok */ tok_len; /* Length (in bytes) of that tok */ + /* + * If we have an external param at this location, but no lists are + * being squashed across the query, then we skip here; this will make + * us print print the characters found in the original query that + * represent the parameter in the next iteration (or after the loop is + * done), which is a bit odd but seems to work okay in most cases. + */ + if (jstate->clocations[i].extern_param && !jstate->has_squashed_lists) + continue; + off = jstate->clocations[i].location; /* Adjust recorded location if we're dealing with partial string */ diff --git a/contrib/pg_stat_statements/sql/extended.sql b/contrib/pg_stat_statements/sql/extended.sql index a366658a53a72..9a6518e2f0487 100644 --- a/contrib/pg_stat_statements/sql/extended.sql +++ b/contrib/pg_stat_statements/sql/extended.sql @@ -21,17 +21,26 @@ SELECT $1 \bind 'unnamed_val1' \g SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"; -- Various parameter numbering patterns -SELECT pg_stat_statements_reset() IS NOT NULL AS t; -- Unique query IDs with parameter numbers switched. +SELECT pg_stat_statements_reset() IS NOT NULL AS t; SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; -- Two groups of two queries with the same query ID. +SELECT pg_stat_statements_reset() IS NOT NULL AS t; SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g +SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; +SELECT pg_stat_statements_reset() IS NOT NULL AS t; + +-- no squashable list, the parameters id's are kept as-is +SELECT WHERE $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g +-- squashable list, so the parameter IDs will be re-assigned +SELECT WHERE 1 IN (1, 2, 3) AND $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; diff --git a/contrib/pg_stat_statements/sql/squashing.sql b/contrib/pg_stat_statements/sql/squashing.sql index 946e149831c94..53138d125a92c 100644 --- a/contrib/pg_stat_statements/sql/squashing.sql +++ b/contrib/pg_stat_statements/sql/squashing.sql @@ -32,7 +32,7 @@ SELECT WHERE 1 IN (1, int4(1), int4(2), 2); SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2), 2]); SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; --- external parameters will not be squashed +-- external parameters will be squashed SELECT pg_stat_statements_reset() IS NOT NULL AS t; SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) \bind 1 2 3 4 5 ; @@ -40,7 +40,7 @@ SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) \bind ; SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C"; --- neither are prepared statements +-- prepared statements will also be squashed -- the IN and ARRAY forms of this statement will have the same queryId SELECT pg_stat_statements_reset() IS NOT NULL AS t; PREPARE p1(int, int, int, int, int) AS diff --git a/src/backend/nodes/queryjumblefuncs.c b/src/backend/nodes/queryjumblefuncs.c index 62e3a677cd191..31f971519772d 100644 --- a/src/backend/nodes/queryjumblefuncs.c +++ b/src/backend/nodes/queryjumblefuncs.c @@ -21,6 +21,11 @@ * tree(s) generated from the query. The executor can then use this value * to blame query costs on the proper queryId. * + * Arrays of two or more constants and PARAM_EXTERN parameters are "squashed" + * and contribute only once to the jumble. This has the effect that queries + * that differ only on the length of such lists have the same queryId. + * + * * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -61,11 +66,13 @@ static void AppendJumble(JumbleState *jstate, const unsigned char *value, Size size); static void FlushPendingNulls(JumbleState *jstate); static void RecordConstLocation(JumbleState *jstate, + bool extern_param, int location, int len); static void _jumbleNode(JumbleState *jstate, Node *node); +static void _jumbleList(JumbleState *jstate, Node *node); static void _jumbleElements(JumbleState *jstate, List *elements, Node *node); +static void _jumbleParam(JumbleState *jstate, Node *node); static void _jumbleA_Const(JumbleState *jstate, Node *node); -static void _jumbleList(JumbleState *jstate, Node *node); static void _jumbleVariableSetStmt(JumbleState *jstate, Node *node); static void _jumbleRangeTblEntry_eref(JumbleState *jstate, RangeTblEntry *rte, @@ -185,6 +192,7 @@ InitJumble(void) jstate->clocations_count = 0; jstate->highest_extern_param_id = 0; jstate->pending_nulls = 0; + jstate->has_squashed_lists = false; #ifdef USE_ASSERT_CHECKING jstate->total_jumble_len = 0; #endif @@ -207,6 +215,10 @@ DoJumble(JumbleState *jstate, Node *node) if (jstate->pending_nulls > 0) FlushPendingNulls(jstate); + /* Squashed list found, reset highest_extern_param_id */ + if (jstate->has_squashed_lists) + jstate->highest_extern_param_id = 0; + /* Process the jumble buffer and produce the hash value */ return DatumGetInt64(hash_any_extended(jstate->jumble, jstate->jumble_len, @@ -376,14 +388,14 @@ FlushPendingNulls(JumbleState *jstate) * Record the location of some kind of constant within a query string. * These are not only bare constants but also expressions that ultimately * constitute a constant, such as those inside casts and simple function - * calls. + * calls; if extern_param, then it corresponds to a PARAM_EXTERN Param. * * If length is -1, it indicates a single such constant element. If * it's a positive integer, it indicates the length of a squashable * list of them. */ static void -RecordConstLocation(JumbleState *jstate, int location, int len) +RecordConstLocation(JumbleState *jstate, bool extern_param, int location, int len) { /* -1 indicates unknown or undefined location */ if (location >= 0) @@ -406,6 +418,7 @@ RecordConstLocation(JumbleState *jstate, int location, int len) Assert(len > -1 || len == -1); jstate->clocations[jstate->clocations_count].length = len; jstate->clocations[jstate->clocations_count].squashed = (len > -1); + jstate->clocations[jstate->clocations_count].extern_param = extern_param; jstate->clocations_count++; } } @@ -417,7 +430,8 @@ RecordConstLocation(JumbleState *jstate, int location, int len) * - See through any wrapping RelabelType and CoerceViaIO layers. * - If it's a FuncExpr, check that the function is a builtin * cast and its arguments are Const. - * - Otherwise test if the expression is a simple Const. + * - Otherwise test if the expression is a simple Const or a + * PARAM_EXTERN param. */ static bool IsSquashableConstant(Node *element) @@ -438,6 +452,9 @@ IsSquashableConstant(Node *element) case T_Const: return true; + case T_Param: + return castNode(Param, element)->paramkind == PARAM_EXTERN; + case T_FuncExpr: { FuncExpr *func = (FuncExpr *) element; @@ -487,8 +504,8 @@ IsSquashableConstant(Node *element) * Return value indicates if squashing is possible. * * Note that this function searches only for explicit Const nodes with - * possibly very simple decorations on top, and does not try to simplify - * expressions. + * possibly very simple decorations on top and PARAM_EXTERN parameters, + * and does not try to simplify expressions. */ static bool IsSquashableConstantList(List *elements) @@ -513,7 +530,7 @@ IsSquashableConstantList(List *elements) #define JUMBLE_ELEMENTS(list, node) \ _jumbleElements(jstate, (List *) expr->list, node) #define JUMBLE_LOCATION(location) \ - RecordConstLocation(jstate, expr->location, -1) + RecordConstLocation(jstate, false, expr->location, -1) #define JUMBLE_FIELD(item) \ do { \ if (sizeof(expr->item) == 8) \ @@ -540,42 +557,6 @@ do { \ #include "queryjumblefuncs.funcs.c" -/* - * We try to jumble lists of expressions as one individual item regardless - * of how many elements are in the list. This is know as squashing, which - * results in different queries jumbling to the same query_id, if the only - * difference is the number of elements in the list. - * - * We allow constants to be squashed. To normalize such queries, we use - * the start and end locations of the list of elements in a list. - */ -static void -_jumbleElements(JumbleState *jstate, List *elements, Node *node) -{ - bool normalize_list = false; - - if (IsSquashableConstantList(elements)) - { - if (IsA(node, ArrayExpr)) - { - ArrayExpr *aexpr = (ArrayExpr *) node; - - if (aexpr->list_start > 0 && aexpr->list_end > 0) - { - RecordConstLocation(jstate, - aexpr->list_start + 1, - (aexpr->list_end - aexpr->list_start) - 1); - normalize_list = true; - } - } - } - - if (!normalize_list) - { - _jumbleNode(jstate, (Node *) elements); - } -} - static void _jumbleNode(JumbleState *jstate, Node *node) { @@ -617,26 +598,6 @@ _jumbleNode(JumbleState *jstate, Node *node) break; } - /* Special cases to handle outside the automated code */ - switch (nodeTag(expr)) - { - case T_Param: - { - Param *p = (Param *) node; - - /* - * Update the highest Param id seen, in order to start - * normalization correctly. - */ - if (p->paramkind == PARAM_EXTERN && - p->paramid > jstate->highest_extern_param_id) - jstate->highest_extern_param_id = p->paramid; - } - break; - default: - break; - } - /* Ensure we added something to the jumble buffer */ Assert(jstate->total_jumble_len > prev_jumble_len); } @@ -672,6 +633,79 @@ _jumbleList(JumbleState *jstate, Node *node) } } +/* + * We try to jumble lists of expressions as one individual item regardless + * of how many elements are in the list. This is know as squashing, which + * results in different queries jumbling to the same query_id, if the only + * difference is the number of elements in the list. + * + * We allow constants and PARAM_EXTERN parameters to be squashed. To normalize + * such queries, we use the start and end locations of the list of elements in + * a list. + */ +static void +_jumbleElements(JumbleState *jstate, List *elements, Node *node) +{ + bool normalize_list = false; + + if (IsSquashableConstantList(elements)) + { + if (IsA(node, ArrayExpr)) + { + ArrayExpr *aexpr = (ArrayExpr *) node; + + if (aexpr->list_start > 0 && aexpr->list_end > 0) + { + RecordConstLocation(jstate, + false, + aexpr->list_start + 1, + (aexpr->list_end - aexpr->list_start) - 1); + normalize_list = true; + jstate->has_squashed_lists = true; + } + } + } + + if (!normalize_list) + { + _jumbleNode(jstate, (Node *) elements); + } +} + +/* + * We store the highest param ID of extern params. This can later be used + * to start the numbering of the placeholder for squashed lists. + */ +static void +_jumbleParam(JumbleState *jstate, Node *node) +{ + Param *expr = (Param *) node; + + JUMBLE_FIELD(paramkind); + JUMBLE_FIELD(paramid); + JUMBLE_FIELD(paramtype); + /* paramtypmode and paramcollid are ignored */ + + if (expr->paramkind == PARAM_EXTERN) + { + /* + * At this point, only external parameter locations outside of + * squashable lists will be recorded. + */ + RecordConstLocation(jstate, true, expr->location, -1); + + /* + * Update the highest Param id seen, in order to start normalization + * correctly. + * + * Note: This value is reset at the end of jumbling if there exists a + * squashable list. See the comment in the definition of JumbleState. + */ + if (expr->paramid > jstate->highest_extern_param_id) + jstate->highest_extern_param_id = expr->paramid; + } +} + static void _jumbleA_Const(JumbleState *jstate, Node *node) { diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 01510b01b649b..6dfca3cb35ba5 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -389,14 +389,16 @@ typedef enum ParamKind typedef struct Param { + pg_node_attr(custom_query_jumble) + Expr xpr; ParamKind paramkind; /* kind of parameter. See above */ int paramid; /* numeric ID for parameter */ Oid paramtype; /* pg_type OID of parameter's datatype */ /* typmod value, if known */ - int32 paramtypmod pg_node_attr(query_jumble_ignore); + int32 paramtypmod; /* OID of collation, or InvalidOid if none */ - Oid paramcollid pg_node_attr(query_jumble_ignore); + Oid paramcollid; /* token location, or -1 if unknown */ ParseLoc location; } Param; diff --git a/src/include/nodes/queryjumble.h b/src/include/nodes/queryjumble.h index da7c7abed2e6a..dcb36dcb44f2f 100644 --- a/src/include/nodes/queryjumble.h +++ b/src/include/nodes/queryjumble.h @@ -24,11 +24,11 @@ typedef struct LocationLen int location; /* start offset in query text */ int length; /* length in bytes, or -1 to ignore */ - /* - * Indicates that this location represents the beginning or end of a run - * of squashed constants. - */ + /* Does this location represent a squashed list? */ bool squashed; + + /* Is this location a PARAM_EXTERN parameter? */ + bool extern_param; } LocationLen; /* @@ -52,9 +52,18 @@ typedef struct JumbleState /* Current number of valid entries in clocations array */ int clocations_count; - /* highest Param id we've seen, in order to start normalization correctly */ + /* + * ID of the highest PARAM_EXTERN parameter we've seen in the query; used + * to start normalization correctly. However, if there are any squashed + * lists in the query, we disregard query-supplied parameter numbers and + * renumber everything. This is to avoid possible gaps caused by + * squashing in case any params are in squashed lists. + */ int highest_extern_param_id; + /* Whether squashable lists are present */ + bool has_squashed_lists; + /* * Count of the number of NULL nodes seen since last appending a value. * These are flushed out to the jumble buffer before subsequent appends From fd519419c9484a47f068cc04e2db81a4ec661669 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 24 Jun 2025 14:14:04 -0400 Subject: [PATCH 062/181] Prevent excessive delays before launching new logrep workers. The logical replication launcher process would sometimes sleep for as much as 3 minutes before noticing that it is supposed to launch a new worker. This could happen if (1) WaitForReplicationWorkerAttach absorbed a process latch wakeup that was meant to cause ApplyLauncherMain to do work, or (2) logicalrep_worker_launch reported failure, either because of resource limits or because the new worker terminated immediately. In case (2), the expected behavior is that we retry the launch after wal_retrieve_retry_interval, but that didn't reliably happen. It's not clear how often such conditions would occur in the field, but in our subscription test suite they are somewhat common, especially in tests that exercise cases that cause quick worker failure. That causes the tests to take substantially longer than they ought to do on typical setups. To fix (1), make WaitForReplicationWorkerAttach re-set the latch before returning if it cleared it while looping. To fix (2), ensure that we reduce wait_time to no more than wal_retrieve_retry_interval when logicalrep_worker_launch reports failure. In passing, fix a couple of perhaps-hypothetical race conditions, e.g. examining worker->in_use without a lock. Backpatch to v16. Problem (2) didn't exist before commit 5a3a95385 because the previous code always set wait_time to wal_retrieve_retry_interval when launching a worker, regardless of success or failure of the launch. That behavior also greatly mitigated problem (1), so I'm not excited about adapting the remainder of the patch to the substantially-different code in older branches. Author: Tom Lane Reviewed-by: Amit Kapila Reviewed-by: Ashutosh Bapat Discussion: https://postgr.es/m/817604.1750723007@sss.pgh.pa.us Backpatch-through: 16 --- src/backend/replication/logical/launcher.c | 40 ++++++++++++++++----- src/backend/replication/logical/tablesync.c | 19 ++++++---- 2 files changed, 44 insertions(+), 15 deletions(-) diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 1c3c051403dd6..14d8efbd25bf5 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -175,12 +175,14 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, uint16 generation, BackgroundWorkerHandle *handle) { - BgwHandleStatus status; - int rc; + bool result = false; + bool dropped_latch = false; for (;;) { + BgwHandleStatus status; pid_t pid; + int rc; CHECK_FOR_INTERRUPTS(); @@ -189,8 +191,9 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, /* Worker either died or has started. Return false if died. */ if (!worker->in_use || worker->proc) { + result = worker->in_use; LWLockRelease(LogicalRepWorkerLock); - return worker->in_use; + break; } LWLockRelease(LogicalRepWorkerLock); @@ -205,7 +208,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, if (generation == worker->generation) logicalrep_worker_cleanup(worker); LWLockRelease(LogicalRepWorkerLock); - return false; + break; /* result is already false */ } /* @@ -220,8 +223,18 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, { ResetLatch(MyLatch); CHECK_FOR_INTERRUPTS(); + dropped_latch = true; } } + + /* + * If we had to clear a latch event in order to wait, be sure to restore + * it before exiting. Otherwise caller may miss events. + */ + if (dropped_latch) + SetLatch(MyLatch); + + return result; } /* @@ -1194,10 +1207,21 @@ ApplyLauncherMain(Datum main_arg) (elapsed = TimestampDifferenceMilliseconds(last_start, now)) >= wal_retrieve_retry_interval) { ApplyLauncherSetWorkerStartTime(sub->oid, now); - logicalrep_worker_launch(WORKERTYPE_APPLY, - sub->dbid, sub->oid, sub->name, - sub->owner, InvalidOid, - DSM_HANDLE_INVALID); + if (!logicalrep_worker_launch(WORKERTYPE_APPLY, + sub->dbid, sub->oid, sub->name, + sub->owner, InvalidOid, + DSM_HANDLE_INVALID)) + { + /* + * We get here either if we failed to launch a worker + * (perhaps for resource-exhaustion reasons) or if we + * launched one but it immediately quit. Either way, it + * seems appropriate to try again after + * wal_retrieve_retry_interval. + */ + wait_time = Min(wait_time, + wal_retrieve_retry_interval); + } } else { diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 8e1e8762f6258..c90f23ee5b0b2 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -603,14 +603,19 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) TimestampDifferenceExceeds(hentry->last_start_time, now, wal_retrieve_retry_interval)) { - logicalrep_worker_launch(WORKERTYPE_TABLESYNC, - MyLogicalRepWorker->dbid, - MySubscription->oid, - MySubscription->name, - MyLogicalRepWorker->userid, - rstate->relid, - DSM_HANDLE_INVALID); + /* + * Set the last_start_time even if we fail to start + * the worker, so that we won't retry until + * wal_retrieve_retry_interval has elapsed. + */ hentry->last_start_time = now; + (void) logicalrep_worker_launch(WORKERTYPE_TABLESYNC, + MyLogicalRepWorker->dbid, + MySubscription->oid, + MySubscription->name, + MyLogicalRepWorker->userid, + rstate->relid, + DSM_HANDLE_INVALID); } } } From 84c4e10e130f411a89a2f9fd9184075ef02b0eaf Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 25 Jun 2025 08:59:25 +0900 Subject: [PATCH 063/181] doc: Add secondary index entries for vacuum-related parameters. For parameters that exist as both configuration and storage options, the documentation typically includes secondary index entries to help users distinguish and locate the relevant references easily. However, such index entries were missing for vacuum_truncate and vacuum_max_eager_freeze_failure_rate, both introduced in v18. This commit adds appropriate secondary index terms for these parameters to ensure consistency with other parameters and improve usability of the documentation index. Author: Fujii Masao Discussion: https://postgr.es/m/e95c899a-2aeb-45b7-8fd3-7a27dcdb475b@oss.nttdata.com --- doc/src/sgml/config.sgml | 6 ++++-- doc/src/sgml/ref/create_table.sgml | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index b265cc89c9d46..0ac519706a4d6 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -9340,7 +9340,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_truncate (boolean) - vacuum_truncate configuration parameter + vacuum_truncate + configuration parameter @@ -9544,7 +9545,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_max_eager_freeze_failure_rate (floating point) - vacuum_max_eager_freeze_failure_rate configuration parameter + vacuum_max_eager_freeze_failure_rate + configuration parameter diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index a581691818278..9d92e0a35516f 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1694,7 +1694,8 @@ WITH ( MODULUS numeric_literal, REM vacuum_truncate, toast.vacuum_truncate (boolean) - vacuum_truncate storage parameter + vacuum_truncate + storage parameter From a9c2bde9295574fc79ce0dea1a2b481c8804c1a5 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 25 Jun 2025 09:01:13 +0900 Subject: [PATCH 064/181] doc: Mention ANALYZE VERBOSE in track_cost_delay_timing description. The documentation for track_cost_delay_timing describes where cost-based vacuum delay timing information is displayed when the setting is enabled. While this information is also shown in the output of ANALYZE VERBOSE, that was previously omitted from the list. This commit updates the documentation to include ANALYZE VERBOSE in the list, clarifying that it also reports cost-based delay timing information. Author: Fujii Masao Discussion: https://postgr.es/m/e95c899a-2aeb-45b7-8fd3-7a27dcdb475b@oss.nttdata.com --- doc/src/sgml/config.sgml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 0ac519706a4d6..f4ba58bb5c3df 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -8602,7 +8602,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; timing information is displayed in pg_stat_progress_vacuum, pg_stat_progress_analyze, - in the output of when the + in the output of and + when the VERBOSE option is used, and by autovacuum for auto-vacuums and auto-analyzes when is set. From 82015fd9bdc0827c33245a0eef620f854d53b720 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 25 Jun 2025 09:02:31 +0900 Subject: [PATCH 065/181] doc: Fix type description of io_workers GUC for consistency. The documentation previously described the type of the io_workers GUC parameter as "int". However, the documentation consistently uses "integer" for parameters of this type. This commit updates the type description of io_workers to "integer" for consistency with other GUC parameter descriptions. Author: Fujii Masao Discussion: https://postgr.es/m/e95c899a-2aeb-45b7-8fd3-7a27dcdb475b@oss.nttdata.com --- doc/src/sgml/config.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index f4ba58bb5c3df..59a0874528a3a 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2788,7 +2788,7 @@ include_dir 'conf.d' - io_workers (int) + io_workers (integer) io_workers configuration parameter From 661643dedad97f8b924991fdc739b1f47b0fb60b Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Wed, 25 Jun 2025 10:03:46 +0900 Subject: [PATCH 066/181] Avoid scribbling of VACUUM options This fixes two issues with the handling of VacuumParams in vacuum_rel(). This code path has the idea to change the passed-in pointer of VacuumParams for the "truncate" and "index_cleanup" options for the relation worked on, impacting the two following scenarios where incorrect options may be used because a VacuumParams pointer is shared across multiple relations: - Multiple relations in a single VACUUM command. - TOAST relations vacuumed with their main relation. The problem is avoided by providing to the two callers of vacuum_rel() copies of VacuumParams, before the pointer is updated for the "truncate" and "index_cleanup" options. The refactoring of the VACUUM option and parameters done in 0d831389749a did not introduce an issue, but it has encouraged the problem we are dealing with in this commit, with b84dbc8eb80b for "truncate" and a96c41feec6b for "index_cleanup" that have been added a couple of years after the initial refactoring. HEAD will be improved with a different patch that hardens the uses of VacuumParams across the tree. This cannot be backpatched as it introduces an ABI breakage. The backend portion of the patch has been authored by Nathan, while I have implemented the tests. The tests rely on injection points to check the option values, making them faster, more reliable than the tests originally proposed by Shihao, and they also provide more coverage. This part can only be backpatched down to v17. Reported-by: Shihao Zhong Author: Nathan Bossart Co-authored-by: Michael Paquier Discussion: https://postgr.es/m/CAGRkXqTo+aK=GTy5pSc-9cy8H2F2TJvcrZ-zXEiNJj93np1UUw@mail.gmail.com Backpatch-through: 13 --- src/backend/commands/vacuum.c | 39 +++++- src/test/modules/injection_points/Makefile | 2 +- .../injection_points/expected/vacuum.out | 122 ++++++++++++++++++ src/test/modules/injection_points/meson.build | 1 + .../modules/injection_points/sql/vacuum.sql | 47 +++++++ 5 files changed, 206 insertions(+), 5 deletions(-) create mode 100644 src/test/modules/injection_points/expected/vacuum.out create mode 100644 src/test/modules/injection_points/sql/vacuum.sql diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 33a33bf6b1cfa..02993d320dafc 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -56,6 +56,7 @@ #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/guc_hooks.h" +#include "utils/injection_point.h" #include "utils/memutils.h" #include "utils/snapmgr.h" #include "utils/syscache.h" @@ -634,7 +635,15 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, if (params->options & VACOPT_VACUUM) { - if (!vacuum_rel(vrel->oid, vrel->relation, params, bstrategy)) + VacuumParams params_copy; + + /* + * vacuum_rel() scribbles on the parameters, so give it a copy + * to avoid affecting other relations. + */ + memcpy(¶ms_copy, params, sizeof(VacuumParams)); + + if (!vacuum_rel(vrel->oid, vrel->relation, ¶ms_copy, bstrategy)) continue; } @@ -2008,9 +2017,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, Oid save_userid; int save_sec_context; int save_nestlevel; + VacuumParams toast_vacuum_params; Assert(params != NULL); + /* + * This function scribbles on the parameters, so make a copy early to + * avoid affecting the TOAST table (if we do end up recursing to it). + */ + memcpy(&toast_vacuum_params, params, sizeof(VacuumParams)); + /* Begin a transaction for vacuuming this relation */ StartTransactionCommand(); @@ -2191,6 +2207,15 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, } } +#ifdef USE_INJECTION_POINTS + if (params->index_cleanup == VACOPTVALUE_AUTO) + INJECTION_POINT("vacuum-index-cleanup-auto", NULL); + else if (params->index_cleanup == VACOPTVALUE_DISABLED) + INJECTION_POINT("vacuum-index-cleanup-disabled", NULL); + else if (params->index_cleanup == VACOPTVALUE_ENABLED) + INJECTION_POINT("vacuum-index-cleanup-enabled", NULL); +#endif + /* * Check if the vacuum_max_eager_freeze_failure_rate table storage * parameter was specified. This overrides the GUC value. @@ -2221,6 +2246,15 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, params->truncate = VACOPTVALUE_DISABLED; } +#ifdef USE_INJECTION_POINTS + if (params->truncate == VACOPTVALUE_AUTO) + INJECTION_POINT("vacuum-truncate-auto", NULL); + else if (params->truncate == VACOPTVALUE_DISABLED) + INJECTION_POINT("vacuum-truncate-disabled", NULL); + else if (params->truncate == VACOPTVALUE_ENABLED) + INJECTION_POINT("vacuum-truncate-enabled", NULL); +#endif + /* * Remember the relation's TOAST relation for later, if the caller asked * us to process it. In VACUUM FULL, though, the toast table is @@ -2299,15 +2333,12 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, */ if (toast_relid != InvalidOid) { - VacuumParams toast_vacuum_params; - /* * Force VACOPT_PROCESS_MAIN so vacuum_rel() processes it. Likewise, * set toast_parent so that the privilege checks are done on the main * relation. NB: This is only safe to do because we hold a session * lock on the main relation that prevents concurrent deletion. */ - memcpy(&toast_vacuum_params, params, sizeof(VacuumParams)); toast_vacuum_params.options |= VACOPT_PROCESS_MAIN; toast_vacuum_params.toast_parent = relid; diff --git a/src/test/modules/injection_points/Makefile b/src/test/modules/injection_points/Makefile index e680991f8d4f0..fc82cd67f6cd6 100644 --- a/src/test/modules/injection_points/Makefile +++ b/src/test/modules/injection_points/Makefile @@ -11,7 +11,7 @@ EXTENSION = injection_points DATA = injection_points--1.0.sql PGFILEDESC = "injection_points - facility for injection points" -REGRESS = injection_points hashagg reindex_conc +REGRESS = injection_points hashagg reindex_conc vacuum REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress ISOLATION = basic inplace syscache-update-pruned diff --git a/src/test/modules/injection_points/expected/vacuum.out b/src/test/modules/injection_points/expected/vacuum.out new file mode 100644 index 0000000000000..58df59fa927e3 --- /dev/null +++ b/src/test/modules/injection_points/expected/vacuum.out @@ -0,0 +1,122 @@ +-- Tests for VACUUM +CREATE EXTENSION injection_points; +SELECT injection_points_set_local(); + injection_points_set_local +---------------------------- + +(1 row) + +SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice'); + injection_points_attach +------------------------- + +(1 row) + +SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice'); + injection_points_attach +------------------------- + +(1 row) + +SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice'); + injection_points_attach +------------------------- + +(1 row) + +SELECT injection_points_attach('vacuum-truncate-auto', 'notice'); + injection_points_attach +------------------------- + +(1 row) + +SELECT injection_points_attach('vacuum-truncate-disabled', 'notice'); + injection_points_attach +------------------------- + +(1 row) + +SELECT injection_points_attach('vacuum-truncate-enabled', 'notice'); + injection_points_attach +------------------------- + +(1 row) + +-- Check state of index_cleanup and truncate in VACUUM. +CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH + (autovacuum_enabled=false, + vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false, + vacuum_truncate=true, toast.vacuum_truncate=false); +CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH + (autovacuum_enabled=false, + vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true, + vacuum_truncate=false, toast.vacuum_truncate=true); +-- Multiple relations should use their options in isolation. +VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on; +NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled +NOTICE: notice triggered for injection point vacuum-truncate-enabled +NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled +NOTICE: notice triggered for injection point vacuum-truncate-disabled +NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled +NOTICE: notice triggered for injection point vacuum-truncate-disabled +NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled +NOTICE: notice triggered for injection point vacuum-truncate-enabled +-- Check "auto" case of index_cleanup and "truncate" controlled by +-- its GUC. +CREATE TABLE vac_tab_auto(i int, j text) WITH + (autovacuum_enabled=false, + vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto); +SET vacuum_truncate = false; +VACUUM vac_tab_auto; +NOTICE: notice triggered for injection point vacuum-index-cleanup-auto +NOTICE: notice triggered for injection point vacuum-truncate-disabled +NOTICE: notice triggered for injection point vacuum-index-cleanup-auto +NOTICE: notice triggered for injection point vacuum-truncate-disabled +SET vacuum_truncate = true; +VACUUM vac_tab_auto; +NOTICE: notice triggered for injection point vacuum-index-cleanup-auto +NOTICE: notice triggered for injection point vacuum-truncate-enabled +NOTICE: notice triggered for injection point vacuum-index-cleanup-auto +NOTICE: notice triggered for injection point vacuum-truncate-enabled +RESET vacuum_truncate; +DROP TABLE vac_tab_auto; +DROP TABLE vac_tab_on_toast_off; +DROP TABLE vac_tab_off_toast_on; +-- Cleanup +SELECT injection_points_detach('vacuum-index-cleanup-auto'); + injection_points_detach +------------------------- + +(1 row) + +SELECT injection_points_detach('vacuum-index-cleanup-disabled'); + injection_points_detach +------------------------- + +(1 row) + +SELECT injection_points_detach('vacuum-index-cleanup-enabled'); + injection_points_detach +------------------------- + +(1 row) + +SELECT injection_points_detach('vacuum-truncate-auto'); + injection_points_detach +------------------------- + +(1 row) + +SELECT injection_points_detach('vacuum-truncate-disabled'); + injection_points_detach +------------------------- + +(1 row) + +SELECT injection_points_detach('vacuum-truncate-enabled'); + injection_points_detach +------------------------- + +(1 row) + +DROP EXTENSION injection_points; diff --git a/src/test/modules/injection_points/meson.build b/src/test/modules/injection_points/meson.build index d61149712fd7d..ce778ccf9ac45 100644 --- a/src/test/modules/injection_points/meson.build +++ b/src/test/modules/injection_points/meson.build @@ -37,6 +37,7 @@ tests += { 'injection_points', 'hashagg', 'reindex_conc', + 'vacuum', ], 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'], # The injection points are cluster-wide, so disable installcheck diff --git a/src/test/modules/injection_points/sql/vacuum.sql b/src/test/modules/injection_points/sql/vacuum.sql new file mode 100644 index 0000000000000..23760dd0f380a --- /dev/null +++ b/src/test/modules/injection_points/sql/vacuum.sql @@ -0,0 +1,47 @@ +-- Tests for VACUUM + +CREATE EXTENSION injection_points; + +SELECT injection_points_set_local(); +SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice'); +SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice'); +SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice'); +SELECT injection_points_attach('vacuum-truncate-auto', 'notice'); +SELECT injection_points_attach('vacuum-truncate-disabled', 'notice'); +SELECT injection_points_attach('vacuum-truncate-enabled', 'notice'); + +-- Check state of index_cleanup and truncate in VACUUM. +CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH + (autovacuum_enabled=false, + vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false, + vacuum_truncate=true, toast.vacuum_truncate=false); +CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH + (autovacuum_enabled=false, + vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true, + vacuum_truncate=false, toast.vacuum_truncate=true); +-- Multiple relations should use their options in isolation. +VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on; + +-- Check "auto" case of index_cleanup and "truncate" controlled by +-- its GUC. +CREATE TABLE vac_tab_auto(i int, j text) WITH + (autovacuum_enabled=false, + vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto); +SET vacuum_truncate = false; +VACUUM vac_tab_auto; +SET vacuum_truncate = true; +VACUUM vac_tab_auto; +RESET vacuum_truncate; + +DROP TABLE vac_tab_auto; +DROP TABLE vac_tab_on_toast_off; +DROP TABLE vac_tab_off_toast_on; + +-- Cleanup +SELECT injection_points_detach('vacuum-index-cleanup-auto'); +SELECT injection_points_detach('vacuum-index-cleanup-disabled'); +SELECT injection_points_detach('vacuum-index-cleanup-enabled'); +SELECT injection_points_detach('vacuum-truncate-auto'); +SELECT injection_points_detach('vacuum-truncate-disabled'); +SELECT injection_points_detach('vacuum-truncate-enabled'); +DROP EXTENSION injection_points; From 69e5cdc47fa1fbd97c8b8c7abe6fb0b9a9822acb Mon Sep 17 00:00:00 2001 From: Amit Kapila Date: Wed, 25 Jun 2025 10:25:15 +0530 Subject: [PATCH 067/181] Doc: Improve documentation of stream abort. Protocol v4 introduces parallel streaming, which allows Stream Abort messages to include additional abort information such as LSN and timestamp. However, the current documentation only states, "This field is available since protocol version 4," which may misleadingly suggest that the fields are always present when using protocol v4. This patch clarifies that the abort LSN and timestamp are included only when parallel streaming is enabled, even under protocol v4. Author: Anthonin Bonnefoy Reviewed-by: Amit Kapila Backpatch-through: 16, where it was introduced Discussion: https://postgr.es/m/CAO6_XqoKteQR1AnaR8iPcegbBE+HkAc2-g12rxN04yOt4-2ORg@mail.gmail.com --- doc/src/sgml/protocol.sgml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index 137ffc8d0b7eb..82fe3f93761dc 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -7292,8 +7292,8 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" Int64 (XLogRecPtr) - The LSN of the abort. This field is available since protocol version - 4. + The LSN of the abort operation, present only when streaming is set to parallel. + This field is available since protocol version 4. @@ -7302,9 +7302,9 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" Int64 (TimestampTz) - Abort timestamp of the transaction. The value is in number - of microseconds since PostgreSQL epoch (2000-01-01). This field is - available since protocol version 4. + Abort timestamp of the transaction, present only when streaming is set to + parallel. The value is in number of microseconds since PostgreSQL epoch (2000-01-01). + This field is available since protocol version 4. From 0cd69b3d7ef357f2b43258de5831c4de0bd51dec Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Wed, 25 Jun 2025 09:55:04 +0200 Subject: [PATCH 068/181] Restrict virtual columns to use built-in functions and types Just like selecting from a view is exploitable (CVE-2024-7348), selecting from a table with virtual generated columns is exploitable. Users who are concerned about this can avoid selecting from views, but telling them to avoid selecting from tables is less practical. To address this, this changes it so that generation expressions for virtual generated columns are restricted to using built-in functions and types, and the columns are restricted to having a built-in type. We assume that built-in functions and types cannot be exploited for this purpose. In the future, this could be expanded by some new mechanism to declare other functions and types as safe or trusted for this purpose, but that is to be designed. (An alternative approach might have been to expand the restrict_nonsystem_relation_kind GUC to handle this, like the fix for CVE-2024-7348. But that is kind of an ugly approach. That fix had to fit in the constraints of fixing an ancient vulnerability in all branches. Since virtual generated columns are new, we're free from the constraints of the past, and we can and should use cleaner options.) Reported-by: Feike Steenbergen Reviewed-by: jian he Discussion: https://www.postgresql.org/message-id/flat/CAK_s-G2Q7de8Q0qOYUR%3D_CTB5FzzVBm5iZjOp%2BmeVWpMpmfO0w%40mail.gmail.com --- doc/src/sgml/ddl.sgml | 10 ++ doc/src/sgml/ref/create_table.sgml | 9 ++ src/backend/catalog/heap.c | 93 +++++++++++++++++++ src/include/catalog/catversion.h | 2 +- .../regress/expected/generated_virtual.out | 43 +++++---- src/test/regress/expected/publication.out | 12 ++- src/test/regress/sql/generated_virtual.sql | 24 +++-- src/test/regress/sql/publication.sql | 5 +- 8 files changed, 163 insertions(+), 35 deletions(-) diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index 96936bcd3ae3e..65bc070d2e5fa 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -419,6 +419,16 @@ CREATE TABLE people ( tableoid. + + + A virtual generated column cannot have a user-defined type, and the + generation expression of a virtual generated column must not reference + user-defined functions or types, that is, it can only use built-in + functions or types. This applies also indirectly, such as for functions + or types that underlie operators or casts. (This restriction does not + exist for stored generated columns.) + + A generated column cannot have a column default or an identity definition. diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 9d92e0a35516f..dc000e913c143 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -924,6 +924,15 @@ WITH ( MODULUS numeric_literal, REM not other generated columns. Any functions and operators used must be immutable. References to other tables are not allowed. + + + A virtual generated column cannot have a user-defined type, and the + generation expression of a virtual generated column must not reference + user-defined functions or types, that is, it can only use built-in + functions or types. This applies also indirectly, such as for functions + or types that underlie operators or casts. (This restriction does not + exist for stored generated columns.) + diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 10f43c51c5af0..649d3966e8e21 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -664,6 +664,15 @@ CheckAttributeType(const char *attname, flags); } + /* + * For consistency with check_virtual_generated_security(). + */ + if ((flags & CHKATYPE_IS_VIRTUAL) && atttypid >= FirstUnpinnedObjectId) + ereport(ERROR, + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("virtual generated column \"%s\" cannot have a user-defined type", attname), + errdetail("Virtual generated columns that make use of user-defined types are not yet supported.")); + /* * This might not be strictly invalid per SQL standard, but it is pretty * useless, and it cannot be dumped, so we must disallow it. @@ -3215,6 +3224,86 @@ check_nested_generated(ParseState *pstate, Node *node) check_nested_generated_walker(node, pstate); } +/* + * Check security of virtual generated column expression. + * + * Just like selecting from a view is exploitable (CVE-2024-7348), selecting + * from a table with virtual generated columns is exploitable. Users who are + * concerned about this can avoid selecting from views, but telling them to + * avoid selecting from tables is less practical. + * + * To address this, this restricts generation expressions for virtual + * generated columns are restricted to using built-in functions and types. We + * assume that built-in functions and types cannot be exploited for this + * purpose. Note the overall security also requires that all functions in use + * a immutable. (For example, there are some built-in non-immutable functions + * that can run arbitrary SQL.) The immutability is checked elsewhere, since + * that is a property that needs to hold independent of security + * considerations. + * + * In the future, this could be expanded by some new mechanism to declare + * other functions and types as safe or trusted for this purpose, but that is + * to be designed. + */ + +/* + * Callback for check_functions_in_node() that determines whether a function + * is user-defined. + */ +static bool +contains_user_functions_checker(Oid func_id, void *context) +{ + return (func_id >= FirstUnpinnedObjectId); +} + +/* + * Checks for all the things we don't want in the generation expressions of + * virtual generated columns for security reasons. Errors out if it finds + * one. + */ +static bool +check_virtual_generated_security_walker(Node *node, void *context) +{ + ParseState *pstate = context; + + if (node == NULL) + return false; + + if (!IsA(node, List)) + { + if (check_functions_in_node(node, contains_user_functions_checker, NULL)) + ereport(ERROR, + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("generation expression uses user-defined function"), + errdetail("Virtual generated columns that make use of user-defined functions are not yet supported."), + parser_errposition(pstate, exprLocation(node))); + + /* + * check_functions_in_node() doesn't check some node types (see + * comment there). We handle CoerceToDomain and MinMaxExpr by + * checking for built-in types. The other listed node types cannot + * call user-definable SQL-visible functions. + * + * We furthermore need this type check to handle built-in, immutable + * polymorphic functions such as array_eq(). + */ + if (exprType(node) >= FirstUnpinnedObjectId) + ereport(ERROR, + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("generation expression uses user-defined type"), + errdetail("Virtual generated columns that make use of user-defined types are not yet supported."), + parser_errposition(pstate, exprLocation(node))); + } + + return expression_tree_walker(node, check_virtual_generated_security_walker, context); +} + +static void +check_virtual_generated_security(ParseState *pstate, Node *node) +{ + check_virtual_generated_security_walker(node, pstate); +} + /* * Take a raw default and convert it to a cooked format ready for * storage. @@ -3254,6 +3343,10 @@ cookDefault(ParseState *pstate, ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("generation expression is not immutable"))); + + /* Check security of expressions for virtual generated column */ + if (attgenerated == ATTRIBUTE_GENERATED_VIRTUAL) + check_virtual_generated_security(pstate, expr); } else { diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 67fbe9c929276..d63db42ed7b37 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -57,6 +57,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202506121 +#define CATALOG_VERSION_NO 202506251 #endif diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out index ab35a77477445..47cbd3a82fe2d 100644 --- a/src/test/regress/expected/generated_virtual.out +++ b/src/test/regress/expected/generated_virtual.out @@ -553,15 +553,11 @@ CREATE TABLE gtest4 ( a int, b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL ); -INSERT INTO gtest4 VALUES (1), (6); -SELECT * FROM gtest4; - a | b ----+--------- - 1 | (2,3) - 6 | (12,18) -(2 rows) - -DROP TABLE gtest4; +ERROR: virtual generated column "b" cannot have a user-defined type +DETAIL: Virtual generated columns that make use of user-defined types are not yet supported. +--INSERT INTO gtest4 VALUES (1), (6); +--SELECT * FROM gtest4; +--DROP TABLE gtest4; DROP TYPE double_int; -- using tableoid is allowed CREATE TABLE gtest_tableoid ( @@ -604,9 +600,13 @@ INSERT INTO gtest11 VALUES (1, 10), (2, 20); GRANT SELECT (a, c) ON gtest11 TO regress_user11; CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL; REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC; -CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -INSERT INTO gtest12 VALUES (1, 10), (2, 20); -GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11; +CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function +ERROR: generation expression uses user-defined function +LINE 1: ...nt PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VI... + ^ +DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported. +--INSERT INTO gtest12 VALUES (1, 10), (2, 20); +--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11; SET ROLE regress_user11; SELECT a, b FROM gtest11; -- not allowed ERROR: permission denied for table gtest11 @@ -619,15 +619,12 @@ SELECT a, c FROM gtest11; -- allowed SELECT gf1(10); -- not allowed ERROR: permission denied for function gf1 -INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function) -SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed -ERROR: permission denied for function gf1 +--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function) +--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed RESET ROLE; -DROP FUNCTION gf1(int); -- fail -ERROR: cannot drop function gf1(integer) because other objects depend on it -DETAIL: column c of table gtest12 depends on function gf1(integer) -HINT: Use DROP ... CASCADE to drop the dependent objects too. -DROP TABLE gtest11, gtest12; +--DROP FUNCTION gf1(int); -- fail +DROP TABLE gtest11; +--DROP TABLE gtest12; DROP FUNCTION gf1(int); DROP USER regress_user11; -- check constraints @@ -811,6 +808,12 @@ CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTU ERROR: virtual generated column "b" cannot have a domain type --INSERT INTO gtest24nn (a) VALUES (4); -- ok --INSERT INTO gtest24nn (a) VALUES (NULL); -- error +-- using user-defined type not yet supported +CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error +ERROR: generation expression uses user-defined type +LINE 1: ...main1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a... + ^ +DETAIL: Virtual generated columns that make use of user-defined types are not yet supported. -- typed tables (currently not supported) CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint); CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL); diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index 4de96c04f9de4..f1025fc0f198d 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -524,10 +524,16 @@ Tables from schemas: "testpub_rf_schema2" -- fail - virtual generated column uses user-defined function +-- (Actually, this already fails at CREATE TABLE rather than at CREATE +-- PUBLICATION, but let's keep the test in case the former gets +-- relaxed sometime.) CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL); +ERROR: generation expression uses user-defined function +LINE 1: ...RIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf... + ^ +DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported. CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100); -ERROR: invalid publication WHERE expression -DETAIL: User-defined or built-in mutable functions are not allowed. +ERROR: relation "testpub_rf_tbl6" does not exist -- test that SET EXPRESSION is rejected, because it could affect a row filter SET client_min_messages = 'ERROR'; CREATE TABLE testpub_rf_tbl7 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * 111) VIRTUAL); @@ -541,7 +547,7 @@ DROP TABLE testpub_rf_tbl2; DROP TABLE testpub_rf_tbl3; DROP TABLE testpub_rf_tbl4; DROP TABLE testpub_rf_tbl5; -DROP TABLE testpub_rf_tbl6; +--DROP TABLE testpub_rf_tbl6; DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; DROP SCHEMA testpub_rf_schema1; diff --git a/src/test/regress/sql/generated_virtual.sql b/src/test/regress/sql/generated_virtual.sql index 9011c9d26745f..c731d12376341 100644 --- a/src/test/regress/sql/generated_virtual.sql +++ b/src/test/regress/sql/generated_virtual.sql @@ -253,10 +253,10 @@ CREATE TABLE gtest4 ( a int, b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL ); -INSERT INTO gtest4 VALUES (1), (6); -SELECT * FROM gtest4; +--INSERT INTO gtest4 VALUES (1), (6); +--SELECT * FROM gtest4; -DROP TABLE gtest4; +--DROP TABLE gtest4; DROP TYPE double_int; -- using tableoid is allowed @@ -290,20 +290,21 @@ GRANT SELECT (a, c) ON gtest11 TO regress_user11; CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL; REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC; -CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -INSERT INTO gtest12 VALUES (1, 10), (2, 20); -GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11; +CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function +--INSERT INTO gtest12 VALUES (1, 10), (2, 20); +--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11; SET ROLE regress_user11; SELECT a, b FROM gtest11; -- not allowed SELECT a, c FROM gtest11; -- allowed SELECT gf1(10); -- not allowed -INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function) -SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed +--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function) +--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed RESET ROLE; -DROP FUNCTION gf1(int); -- fail -DROP TABLE gtest11, gtest12; +--DROP FUNCTION gf1(int); -- fail +DROP TABLE gtest11; +--DROP TABLE gtest12; DROP FUNCTION gf1(int); DROP USER regress_user11; @@ -463,6 +464,9 @@ CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTU --INSERT INTO gtest24nn (a) VALUES (4); -- ok --INSERT INTO gtest24nn (a) VALUES (NULL); -- error +-- using user-defined type not yet supported +CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error + -- typed tables (currently not supported) CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint); CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL); diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index 68001de4000fd..c9e309190dfa6 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -262,6 +262,9 @@ ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpu RESET client_min_messages; \dRp+ testpub6 -- fail - virtual generated column uses user-defined function +-- (Actually, this already fails at CREATE TABLE rather than at CREATE +-- PUBLICATION, but let's keep the test in case the former gets +-- relaxed sometime.) CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL); CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100); -- test that SET EXPRESSION is rejected, because it could affect a row filter @@ -276,7 +279,7 @@ DROP TABLE testpub_rf_tbl2; DROP TABLE testpub_rf_tbl3; DROP TABLE testpub_rf_tbl4; DROP TABLE testpub_rf_tbl5; -DROP TABLE testpub_rf_tbl6; +--DROP TABLE testpub_rf_tbl6; DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; DROP SCHEMA testpub_rf_schema1; From 60dda7bbc45f30e2ba50ecf20dfb9d944a589a38 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Wed, 25 Jun 2025 10:33:23 +0200 Subject: [PATCH 069/181] pg_createsubscriber: Rename option --remove to --clean After discussion, the name --remove was suboptimally chosen. --clean has more precedent in other PostgreSQL tools. Reviewed-by: Hayato Kuroda (Fujitsu) Discussion: https://www.postgresql.org/message-id/84be7ff3-2763-4c0f-ac1e-ca9862077f41@eisentraut.org --- doc/src/sgml/ref/pg_createsubscriber.sgml | 59 +++++++++---------- src/bin/pg_basebackup/pg_createsubscriber.c | 34 +++++------ .../t/040_pg_createsubscriber.pl | 6 +- 3 files changed, 49 insertions(+), 50 deletions(-) diff --git a/doc/src/sgml/ref/pg_createsubscriber.sgml b/doc/src/sgml/ref/pg_createsubscriber.sgml index 4b1d08d5f16da..bb9cc72576c4a 100644 --- a/doc/src/sgml/ref/pg_createsubscriber.sgml +++ b/doc/src/sgml/ref/pg_createsubscriber.sgml @@ -169,36 +169,6 @@ PostgreSQL documentation - - - - - - Remove all objects of the specified type from specified databases on the - target server. - - - - - - publications: - The FOR ALL TABLES publications established for this - subscriber are always removed; specifying this object type causes all - other publications replicated from the source server to be dropped as - well. - - - - - - The objects selected to be dropped are individually logged, including during - a . There is no opportunity to affect or stop the - dropping of the selected objects, so consider taking a backup of them - using pg_dump. - - - - @@ -259,6 +229,35 @@ PostgreSQL documentation + + + + + Drop all objects of the specified type from specified databases on the + target server. + + + + + + publications: + The FOR ALL TABLES publications established for this + subscriber are always dropped; specifying this object type causes all + other publications replicated from the source server to be dropped as + well. + + + + + + The objects selected to be dropped are individually logged, including during + a . There is no opportunity to affect or stop the + dropping of the selected objects, so consider taking a backup of them + using pg_dump. + + + + diff --git a/src/bin/pg_basebackup/pg_createsubscriber.c b/src/bin/pg_basebackup/pg_createsubscriber.c index c43c0cbbba5a6..11f71c0380181 100644 --- a/src/bin/pg_basebackup/pg_createsubscriber.c +++ b/src/bin/pg_basebackup/pg_createsubscriber.c @@ -46,7 +46,7 @@ struct CreateSubscriberOptions SimpleStringList replslot_names; /* list of replication slot names */ int recovery_timeout; /* stop recovery after this time */ bool all_dbs; /* all option */ - SimpleStringList objecttypes_to_remove; /* list of object types to remove */ + SimpleStringList objecttypes_to_clean; /* list of object types to cleanup */ }; /* per-database publication/subscription info */ @@ -71,8 +71,8 @@ struct LogicalRepInfos { struct LogicalRepInfo *dbinfo; bool two_phase; /* enable-two-phase option */ - bits32 objecttypes_to_remove; /* flags indicating which object types - * to remove on subscriber */ + bits32 objecttypes_to_clean; /* flags indicating which object types + * to clean up on subscriber */ }; static void cleanup_objects_atexit(void); @@ -253,13 +253,13 @@ usage(void) printf(_(" -n, --dry-run dry run, just show what would be done\n")); printf(_(" -p, --subscriber-port=PORT subscriber port number (default %s)\n"), DEFAULT_SUB_PORT); printf(_(" -P, --publisher-server=CONNSTR publisher connection string\n")); - printf(_(" -R, --remove=OBJECTTYPE remove all objects of the specified type from specified\n" - " databases on the subscriber; accepts: \"%s\"\n"), "publications"); printf(_(" -s, --socketdir=DIR socket directory to use (default current dir.)\n")); printf(_(" -t, --recovery-timeout=SECS seconds to wait for recovery to end\n")); printf(_(" -T, --enable-two-phase enable two-phase commit for all subscriptions\n")); printf(_(" -U, --subscriber-username=NAME user name for subscriber connection\n")); printf(_(" -v, --verbose output verbose messages\n")); + printf(_(" --clean=OBJECTTYPE drop all objects of the specified type from specified\n" + " databases on the subscriber; accepts: \"%s\"\n"), "publications"); printf(_(" --config-file=FILENAME use specified main server configuration\n" " file when running target cluster\n")); printf(_(" --publication=NAME publication name\n")); @@ -1730,7 +1730,7 @@ static void check_and_drop_publications(PGconn *conn, struct LogicalRepInfo *dbinfo) { PGresult *res; - bool drop_all_pubs = dbinfos.objecttypes_to_remove & OBJECTTYPE_PUBLICATIONS; + bool drop_all_pubs = dbinfos.objecttypes_to_clean & OBJECTTYPE_PUBLICATIONS; Assert(conn != NULL); @@ -2026,7 +2026,6 @@ main(int argc, char **argv) {"dry-run", no_argument, NULL, 'n'}, {"subscriber-port", required_argument, NULL, 'p'}, {"publisher-server", required_argument, NULL, 'P'}, - {"remove", required_argument, NULL, 'R'}, {"socketdir", required_argument, NULL, 's'}, {"recovery-timeout", required_argument, NULL, 't'}, {"enable-two-phase", no_argument, NULL, 'T'}, @@ -2038,6 +2037,7 @@ main(int argc, char **argv) {"publication", required_argument, NULL, 2}, {"replication-slot", required_argument, NULL, 3}, {"subscription", required_argument, NULL, 4}, + {"clean", required_argument, NULL, 5}, {NULL, 0, NULL, 0} }; @@ -2109,7 +2109,7 @@ main(int argc, char **argv) get_restricted_token(); - while ((c = getopt_long(argc, argv, "ad:D:np:P:R:s:t:TU:v", + while ((c = getopt_long(argc, argv, "ad:D:np:P:s:t:TU:v", long_options, &option_index)) != -1) { switch (c) @@ -2139,12 +2139,6 @@ main(int argc, char **argv) case 'P': opt.pub_conninfo_str = pg_strdup(optarg); break; - case 'R': - if (!simple_string_list_member(&opt.objecttypes_to_remove, optarg)) - simple_string_list_append(&opt.objecttypes_to_remove, optarg); - else - pg_fatal("object type \"%s\" specified more than once for -R/--remove", optarg); - break; case 's': opt.socket_dir = pg_strdup(optarg); canonicalize_path(opt.socket_dir); @@ -2191,6 +2185,12 @@ main(int argc, char **argv) else pg_fatal("subscription \"%s\" specified more than once for --subscription", optarg); break; + case 5: + if (!simple_string_list_member(&opt.objecttypes_to_clean, optarg)) + simple_string_list_append(&opt.objecttypes_to_clean, optarg); + else + pg_fatal("object type \"%s\" specified more than once for --clean", optarg); + break; default: /* getopt_long already emitted a complaint */ pg_log_error_hint("Try \"%s --help\" for more information.", progname); @@ -2334,13 +2334,13 @@ main(int argc, char **argv) } /* Verify the object types specified for removal from the subscriber */ - for (SimpleStringListCell *cell = opt.objecttypes_to_remove.head; cell; cell = cell->next) + for (SimpleStringListCell *cell = opt.objecttypes_to_clean.head; cell; cell = cell->next) { if (pg_strcasecmp(cell->val, "publications") == 0) - dbinfos.objecttypes_to_remove |= OBJECTTYPE_PUBLICATIONS; + dbinfos.objecttypes_to_clean |= OBJECTTYPE_PUBLICATIONS; else { - pg_log_error("invalid object type \"%s\" specified for -R/--remove", cell->val); + pg_log_error("invalid object type \"%s\" specified for --clean", cell->val); pg_log_error_hint("The valid value is: \"%s\"", "publications"); exit(1); } diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl index df4924023fdf2..229fef5b3b52b 100644 --- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl +++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl @@ -331,7 +331,7 @@ sub generate_db $node_p->wait_for_replay_catchup($node_s); # Create user-defined publications, wait for streaming replication to sync them -# to the standby, then verify that '--remove' +# to the standby, then verify that '--clean' # removes them. $node_p->safe_psql( $db1, qq( @@ -446,7 +446,7 @@ sub generate_db # Run pg_createsubscriber on node S. --verbose is used twice # to show more information. # In passing, also test the --enable-two-phase option and -# --remove option +# --clean option command_ok( [ 'pg_createsubscriber', @@ -463,7 +463,7 @@ sub generate_db '--database' => $db1, '--database' => $db2, '--enable-two-phase', - '--remove' => 'publications', + '--clean' => 'publications', ], 'run pg_createsubscriber on node S'); From 62a47aea1d8d8ea36e63fe6dd3d9891452a3f968 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Wed, 25 Jun 2025 12:44:03 +0200 Subject: [PATCH 070/181] doc: Some copy-editing around constraint validation and enforcement Author: Robert Treat Reviewed-by: jian he Discussion: https://www.postgresql.org/message-id/flat/CACJufxFo4yTwzbSZrP%2BzQiR6_M00skoZMFaUnNJCdY6he%3DuQfA%40mail.gmail.com --- doc/src/sgml/ref/alter_table.sgml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index d63f3a621acc6..d16969916835d 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -460,8 +460,8 @@ WITH ( MODULUS numeric_literal, REM This form adds a new constraint to a table using the same constraint syntax as CREATE TABLE, plus the option NOT - VALID, which is currently only allowed for foreign key, - CHECK constraints and not-null constraints. + VALID, which is currently only allowed for foreign-key, + CHECK, and not-null constraints. @@ -469,7 +469,7 @@ WITH ( MODULUS numeric_literal, REM existing rows in the table satisfy the new constraint. But if the NOT VALID option is used, this potentially-lengthy scan is skipped. The constraint will still be - enforced against subsequent inserts or updates (that is, they'll fail + applied against subsequent inserts or updates (that is, they'll fail unless there is a matching row in the referenced table, in the case of foreign keys, or they'll fail unless the new row matches the specified check condition). But the @@ -591,7 +591,7 @@ WITH ( MODULUS numeric_literal, REM This form validates a foreign key, check, or not-null constraint that was previously created as NOT VALID, by scanning the table to ensure there are no rows for which the constraint is not - satisfied. If the constraint is not enforced, an error is thrown. + satisfied. If the constraint was set to NOT ENFORCED, an error is thrown. Nothing happens if the constraint is already marked valid. (See below for an explanation of the usefulness of this command.) @@ -1466,11 +1466,11 @@ WITH ( MODULUS numeric_literal, REM - Adding an enforced CHECK or NOT NULL + Adding a CHECK or NOT NULL constraint requires scanning the table to verify that existing rows meet the constraint, but does not require a table rewrite. If a CHECK - constraint is added as NOT ENFORCED, the validation will - not be performed. + constraint is added as NOT ENFORCED, no verification will + be performed. @@ -1485,7 +1485,7 @@ WITH ( MODULUS numeric_literal, REM - Scanning a large table to verify a new foreign key or check constraint + Scanning a large table to verify new foreign-key, check, or not-null constraints can take a long time, and other updates to the table are locked out until the ALTER TABLE ADD CONSTRAINT command is committed. The main purpose of the NOT VALID From 5069fef1cfae271ca62e254b16dc831145bc5a4f Mon Sep 17 00:00:00 2001 From: Richard Guo Date: Thu, 26 Jun 2025 12:17:12 +0900 Subject: [PATCH 071/181] Expand virtual generated columns for ALTER COLUMN TYPE For the subcommand ALTER COLUMN TYPE of the ALTER TABLE command, the USING expression may reference virtual generated columns. These columns must be expanded before the expression is fed through expression_planner and the expression-execution machinery. Failing to do so can result in incorrect rewrite decisions, and can also lead to "ERROR: unexpected virtual generated column reference". Reported-by: Alexander Lakhin Reviewed-by: jian he Discussion: https://postgr.es/m/b5f96b24-ccac-47fd-9e20-14681b894f36@gmail.com --- src/backend/commands/tablecmds.c | 3 ++ .../regress/expected/generated_virtual.out | 36 ++++++++++--------- src/test/regress/sql/generated_virtual.sql | 10 ++++-- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 074ddb6b9cd17..1c3ad74e7b9e9 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -14484,6 +14484,9 @@ ATPrepAlterColumnType(List **wqueue, /* Fix collations after all else */ assign_expr_collations(pstate, transform); + /* Expand virtual generated columns in the expr. */ + transform = expand_generated_columns_in_expr(transform, rel, 1); + /* Plan the expr now so we can accurately assess the need to rewrite. */ transform = (Node *) expression_planner((Expr *) transform); diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out index 47cbd3a82fe2d..46713f06797e5 100644 --- a/src/test/regress/expected/generated_virtual.out +++ b/src/test/regress/expected/generated_virtual.out @@ -1479,7 +1479,8 @@ create table gtest32 ( a int primary key, b int generated always as (a * 2), c int generated always as (10 + 10), - d int generated always as (coalesce(a, 100)) + d int generated always as (coalesce(a, 100)), + e int ); insert into gtest32 values (1), (2); analyze gtest32; @@ -1563,41 +1564,44 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false; QUERY PLAN ------------------------------------------------------ Nested Loop Left Join - Output: a, (a * 2), (20), (COALESCE(a, 100)) + Output: a, (a * 2), (20), (COALESCE(a, 100)), e Join Filter: false -> Seq Scan on generated_virtual_tests.gtest32 t1 - Output: t1.a, t1.b, t1.c, t1.d + Output: t1.a, t1.b, t1.c, t1.d, t1.e -> Result - Output: a, 20, COALESCE(a, 100) + Output: a, e, 20, COALESCE(a, 100) One-Time Filter: false (8 rows) select t2.* from gtest32 t1 left join gtest32 t2 on false; - a | b | c | d ----+---+---+--- - | | | - | | | + a | b | c | d | e +---+---+---+---+--- + | | | | + | | | | (2 rows) explain (verbose, costs off) -select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20; +select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20; QUERY PLAN ----------------------------------------------------- HashAggregate - Output: a, ((a * 2)), (20), (COALESCE(a, 100)) + Output: a, ((a * 2)), (20), (COALESCE(a, 100)), e Hash Key: t.a Hash Key: (t.a * 2) Hash Key: 20 Hash Key: COALESCE(t.a, 100) + Hash Key: t.e Filter: ((20) = 20) -> Seq Scan on generated_virtual_tests.gtest32 t - Output: a, (a * 2), 20, COALESCE(a, 100) -(9 rows) + Output: a, (a * 2), 20, COALESCE(a, 100), e +(10 rows) -select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20; - a | b | c | d ----+---+----+--- - | | 20 | +select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20; + a | b | c | d | e +---+---+----+---+--- + | | 20 | | (1 row) +-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded +alter table gtest32 alter column e type bigint using b; drop table gtest32; diff --git a/src/test/regress/sql/generated_virtual.sql b/src/test/regress/sql/generated_virtual.sql index c731d12376341..6fa986515b9e3 100644 --- a/src/test/regress/sql/generated_virtual.sql +++ b/src/test/regress/sql/generated_virtual.sql @@ -797,7 +797,8 @@ create table gtest32 ( a int primary key, b int generated always as (a * 2), c int generated always as (10 + 10), - d int generated always as (coalesce(a, 100)) + d int generated always as (coalesce(a, 100)), + e int ); insert into gtest32 values (1), (2); @@ -838,7 +839,10 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false; select t2.* from gtest32 t1 left join gtest32 t2 on false; explain (verbose, costs off) -select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20; -select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20; +select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20; +select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20; + +-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded +alter table gtest32 alter column e type bigint using b; drop table gtest32; From eca624c6591be14a1a03f0b2d374043cdf276431 Mon Sep 17 00:00:00 2001 From: Dean Rasheed Date: Thu, 26 Jun 2025 09:36:05 +0100 Subject: [PATCH 072/181] doc: Fix indentation of MERGE synopsis. The convention in the documentation for other SQL commands is to indent continuation lines and sub-clauses in the "Synopsis" section by 4 spaces, so do the same for MERGE. Author: Dean Rasheed Reviewed-by: Nathan Bossart Discussion: https://postgr.es/m/CAEZATCV+9tR9+WM-SCcdBEZ3x7WVxUpADD5jX9WeGX97z4LCGA@mail.gmail.com Backpatch-through: 15 --- doc/src/sgml/ref/merge.sgml | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/doc/src/sgml/ref/merge.sgml b/doc/src/sgml/ref/merge.sgml index ecbcd8345d874..e76ebd157e52a 100644 --- a/doc/src/sgml/ref/merge.sgml +++ b/doc/src/sgml/ref/merge.sgml @@ -23,37 +23,37 @@ PostgreSQL documentation [ WITH with_query [, ...] ] MERGE INTO [ ONLY ] target_table_name [ * ] [ [ AS ] target_alias ] -USING data_source ON join_condition -when_clause [...] -[ RETURNING [ WITH ( { OLD | NEW } AS output_alias [, ...] ) ] - { * | output_expression [ [ AS ] output_name ] } [, ...] ] + USING data_source ON join_condition + when_clause [...] + [ RETURNING [ WITH ( { OLD | NEW } AS output_alias [, ...] ) ] + { * | output_expression [ [ AS ] output_name ] } [, ...] ] where data_source is: -{ [ ONLY ] source_table_name [ * ] | ( source_query ) } [ [ AS ] source_alias ] + { [ ONLY ] source_table_name [ * ] | ( source_query ) } [ [ AS ] source_alias ] and when_clause is: -{ WHEN MATCHED [ AND condition ] THEN { merge_update | merge_delete | DO NOTHING } | - WHEN NOT MATCHED BY SOURCE [ AND condition ] THEN { merge_update | merge_delete | DO NOTHING } | - WHEN NOT MATCHED [ BY TARGET ] [ AND condition ] THEN { merge_insert | DO NOTHING } } + { WHEN MATCHED [ AND condition ] THEN { merge_update | merge_delete | DO NOTHING } | + WHEN NOT MATCHED BY SOURCE [ AND condition ] THEN { merge_update | merge_delete | DO NOTHING } | + WHEN NOT MATCHED [ BY TARGET ] [ AND condition ] THEN { merge_insert | DO NOTHING } } and merge_insert is: -INSERT [( column_name [, ...] )] -[ OVERRIDING { SYSTEM | USER } VALUE ] -{ VALUES ( { expression | DEFAULT } [, ...] ) | DEFAULT VALUES } + INSERT [( column_name [, ...] )] + [ OVERRIDING { SYSTEM | USER } VALUE ] + { VALUES ( { expression | DEFAULT } [, ...] ) | DEFAULT VALUES } and merge_update is: -UPDATE SET { column_name = { expression | DEFAULT } | - ( column_name [, ...] ) = [ ROW ] ( { expression | DEFAULT } [, ...] ) | - ( column_name [, ...] ) = ( sub-SELECT ) - } [, ...] + UPDATE SET { column_name = { expression | DEFAULT } | + ( column_name [, ...] ) = [ ROW ] ( { expression | DEFAULT } [, ...] ) | + ( column_name [, ...] ) = ( sub-SELECT ) + } [, ...] and merge_delete is: -DELETE + DELETE From 3ba9639e39ed8cf2d9dd30f6b8a3b3e9fffb9a64 Mon Sep 17 00:00:00 2001 From: Dean Rasheed Date: Thu, 26 Jun 2025 10:13:00 +0100 Subject: [PATCH 073/181] doc: Updates for RETURNING OLD/NEW. Fix a couple of sentences in the documentation that were missed in commit 80feb727c8. Author: Dean Rasheed Reviewed-by: Robert Treat Discussion: https://postgr.es/m/CAEZATCUcqADJuapZSjPf2b6hFJ6AGOUwefRvh8Ht3UZoqqw69Q@mail.gmail.com --- doc/src/sgml/ref/merge.sgml | 7 ++++--- doc/src/sgml/ref/update.sgml | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/src/sgml/ref/merge.sgml b/doc/src/sgml/ref/merge.sgml index e76ebd157e52a..c2e181066a4e1 100644 --- a/doc/src/sgml/ref/merge.sgml +++ b/doc/src/sgml/ref/merge.sgml @@ -106,10 +106,11 @@ MERGE INTO [ ONLY ] target_table_namemerge_action() - function can be computed. When an INSERT or + function can be computed. By default, when an INSERT or UPDATE action is performed, the new values of the target - table's columns are used. When a DELETE is performed, - the old values of the target table's columns are used. The syntax of the + table's columns are used, and when a DELETE is performed, + the old values of the target table's columns are used, but it is also + possible to explicitly request old and new values. The syntax of the RETURNING list is identical to that of the output list of SELECT. diff --git a/doc/src/sgml/ref/update.sgml b/doc/src/sgml/ref/update.sgml index 12ec5ba070939..40cca06394636 100644 --- a/doc/src/sgml/ref/update.sgml +++ b/doc/src/sgml/ref/update.sgml @@ -57,7 +57,8 @@ UPDATE [ ONLY ] table_name [ * ] [ to compute and return value(s) based on each row actually updated. Any expression using the table's columns, and/or columns of other tables mentioned in FROM, can be computed. - The new (post-update) values of the table's columns are used. + By default, the new (post-update) values of the table's columns are used, + but it is also possible to request the old (pre-update) values. The syntax of the RETURNING list is identical to that of the output list of SELECT. From 81ce602d48e8b9cbc3c3a7d5b9d9ddbea7789c02 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Thu, 26 Jun 2025 20:25:34 +0900 Subject: [PATCH 074/181] Make CREATE TABLE LIKE copy comments on NOT NULL constraints when requested. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 14e87ffa5c5 introduced support for adding comments to NOT NULL constraints. However, CREATE TABLE LIKE INCLUDING COMMENTS did not copy these comments to the new table. This was an oversight in that commit. This commit corrects the behavior by ensuring CREATE TABLE LIKE to also copy the comments on NOT NULL constraints when INCLUDING COMMENTS is specified. Author: Jian He Co-authored-by: Álvaro Herrera Reviewed-by: Fujii Masao Discussion: https://postgr.es/m/127debef-e558-4784-9e24-0d5eaf91e2d1@oss.nttdata.com --- src/backend/parser/parse_utilcmd.c | 22 ++++++++++++++ .../regress/expected/create_table_like.out | 30 ++++++++++++++++--- src/test/regress/sql/create_table_like.sql | 16 ++++++++-- 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 62015431fdf1a..afcf54169c3b3 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -1279,6 +1279,28 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla lst = RelationGetNotNullConstraints(RelationGetRelid(relation), false, true); cxt->nnconstraints = list_concat(cxt->nnconstraints, lst); + + /* Copy comments on not-null constraints */ + if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) + { + foreach_node(Constraint, nnconstr, lst) + { + if ((comment = GetComment(get_relation_constraint_oid(RelationGetRelid(relation), + nnconstr->conname, false), + ConstraintRelationId, + 0)) != NULL) + { + CommentStmt *stmt = makeNode(CommentStmt); + + stmt->objtype = OBJECT_TABCONSTRAINT; + stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname), + makeString(cxt->relation->relname), + makeString(nnconstr->conname)); + stmt->comment = comment; + cxt->alist = lappend(cxt->alist, stmt); + } + } + } } /* diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out index bf34289e9842b..29a779c2e9072 100644 --- a/src/test/regress/expected/create_table_like.out +++ b/src/test/regress/expected/create_table_like.out @@ -332,9 +332,10 @@ COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; -CREATE TABLE ctlt2 (c text); +CREATE TABLE ctlt2 (c text NOT NULL); ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; COMMENT ON COLUMN ctlt2.c IS 'C'; +COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null'; CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN; @@ -351,9 +352,10 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING --------+------+-----------+----------+---------+----------+--------------+------------- a | text | | not null | | main | | b | text | | | | extended | | - c | text | | | | external | | + c | text | | not null | | external | | Not-null constraints: "ctlt1_a_not_null" NOT NULL "a" + "ctlt2_c_not_null" NOT NULL "c" CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); \d+ ctlt12_comments @@ -362,9 +364,16 @@ CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDIN --------+------+-----------+----------+---------+----------+--------------+------------- a | text | | not null | | extended | | A b | text | | | | extended | | B - c | text | | | | extended | | C + c | text | | not null | | extended | | C Not-null constraints: "ctlt1_a_not_null" NOT NULL "a" + "ctlt2_c_not_null" NOT NULL "c" + +SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass; + conname | description +------------------+--------------- + ctlt2_c_not_null | t2_c_not_null +(1 row) CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); NOTICE: merging column "a" with inherited definition @@ -529,7 +538,9 @@ NOTICE: drop cascades to table inhe -- LIKE must respect NO INHERIT property of constraints CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null, c int not null no inherit); -CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); +COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b'; +COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit'; +CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS); \d+ noinh_con_copy1 Table "public.noinh_con_copy1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -543,6 +554,17 @@ Not-null constraints: "noinh_con_copy_b_not_null" NOT NULL "b" "noinh_con_copy_c_not_null" NOT NULL "c" NO INHERIT +SELECT conname, description +FROM pg_description, pg_constraint c +WHERE classoid = 'pg_constraint'::regclass +AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass +ORDER BY conname COLLATE "C"; + conname | description +---------------------------+----------------------- + noinh_con_copy_b_not_null | not null b + noinh_con_copy_c_not_null | not null c no inherit +(2 rows) + -- fail, as partitioned tables don't allow NO INHERIT constraints CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) PARTITION BY LIST (a); diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql index 6e21722aaeb95..bf8702116a74b 100644 --- a/src/test/regress/sql/create_table_like.sql +++ b/src/test/regress/sql/create_table_like.sql @@ -143,9 +143,10 @@ COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; -CREATE TABLE ctlt2 (c text); +CREATE TABLE ctlt2 (c text NOT NULL); ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; COMMENT ON COLUMN ctlt2.c IS 'C'; +COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null'; CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; @@ -162,6 +163,7 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING \d+ ctlt12_storage CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); \d+ ctlt12_comments +SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass; CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); \d+ ctlt1_inh SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass; @@ -197,9 +199,19 @@ DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_in -- LIKE must respect NO INHERIT property of constraints CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null, c int not null no inherit); -CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); + +COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b'; +COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit'; + +CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS); \d+ noinh_con_copy1 +SELECT conname, description +FROM pg_description, pg_constraint c +WHERE classoid = 'pg_constraint'::regclass +AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass +ORDER BY conname COLLATE "C"; + -- fail, as partitioned tables don't allow NO INHERIT constraints CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) PARTITION BY LIST (a); From 47fb87563bf3cca2244840241dde2eb93830559b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Herrera?= Date: Thu, 26 Jun 2025 18:24:12 +0200 Subject: [PATCH 075/181] pg_dump: include comments on valid not-null constraints, too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We were missing collecting comments for not-null constraints that are dumped inline with the table definition (i.e., valid ones), because they aren't represented by a separately dumpable object. Fix by creating separate TocEntries for the comments. Co-Authored-By: Jian He Co-Authored-By: Álvaro Herrera Reported-By: Fujii Masao Reviewed-By: Fujii Masao Discussion: https://postgr.es/m/d50ff977-c728-4e9e-8488-fc2688e08754@oss.nttdata.com --- doc/src/sgml/ref/pg_dump.sgml | 2 +- src/bin/pg_dump/pg_dump.c | 92 ++++++++++++++++++++--- src/bin/pg_dump/pg_dump.h | 1 + src/bin/pg_dump/t/002_pg_dump.pl | 32 +++++++- src/test/regress/expected/constraints.out | 2 + src/test/regress/sql/constraints.sql | 3 + 6 files changed, 118 insertions(+), 14 deletions(-) diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 0d9270116549a..edbb377a5eda1 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -1281,7 +1281,7 @@ PostgreSQL documentation materialized views, and foriegn tables. Post-data items include definitions of indexes, triggers, rules, statistics for indexes, and constraints other than validated check - constraints. + and not-null constraints. Pre-data items include all other data definition items. diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index db944ec223071..1937997ea674d 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -350,7 +350,9 @@ static void buildMatViewRefreshDependencies(Archive *fout); static void getTableDataFKConstraints(void); static void determineNotNullFlags(Archive *fout, PGresult *res, int r, TableInfo *tbinfo, int j, - int i_notnull_name, int i_notnull_invalidoid, + int i_notnull_name, + int i_notnull_comment, + int i_notnull_invalidoid, int i_notnull_noinherit, int i_notnull_islocal, PQExpBuffer *invalidnotnulloids); @@ -9006,6 +9008,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) int i_attalign; int i_attislocal; int i_notnull_name; + int i_notnull_comment; int i_notnull_noinherit; int i_notnull_islocal; int i_notnull_invalidoid; @@ -9089,7 +9092,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) /* * Find out any NOT NULL markings for each column. In 18 and up we read - * pg_constraint to obtain the constraint name. notnull_noinherit is set + * pg_constraint to obtain the constraint name, and for valid constraints + * also pg_description to obtain its comment. notnull_noinherit is set * according to the NO INHERIT property. For versions prior to 18, we * store an empty string as the name when a constraint is marked as * attnotnull (this cues dumpTableSchema to print the NOT NULL clause @@ -9097,7 +9101,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) * * For invalid constraints, we need to store their OIDs for processing * elsewhere, so we bring the pg_constraint.oid value when the constraint - * is invalid, and NULL otherwise. + * is invalid, and NULL otherwise. Their comments are handled not here + * but by collectComments, because they're their own dumpable object. * * We track in notnull_islocal whether the constraint was defined directly * in this table or via an ancestor, for binary upgrade. flagInhAttrs @@ -9107,6 +9112,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) if (fout->remoteVersion >= 180000) appendPQExpBufferStr(q, "co.conname AS notnull_name,\n" + "CASE WHEN co.convalidated THEN pt.description" + " ELSE NULL END AS notnull_comment,\n" "CASE WHEN NOT co.convalidated THEN co.oid " "ELSE NULL END AS notnull_invalidoid,\n" "co.connoinherit AS notnull_noinherit,\n" @@ -9114,6 +9121,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) else appendPQExpBufferStr(q, "CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n" + "NULL AS notnull_comment,\n" "NULL AS notnull_invalidoid,\n" "false AS notnull_noinherit,\n" "a.attislocal AS notnull_islocal,\n"); @@ -9157,15 +9165,16 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) /* * In versions 18 and up, we need pg_constraint for explicit NOT NULL - * entries. Also, we need to know if the NOT NULL for each column is - * backing a primary key. + * entries and pg_description to get their comments. */ if (fout->remoteVersion >= 180000) appendPQExpBufferStr(q, " LEFT JOIN pg_catalog.pg_constraint co ON " "(a.attrelid = co.conrelid\n" " AND co.contype = 'n' AND " - "co.conkey = array[a.attnum])\n"); + "co.conkey = array[a.attnum])\n" + " LEFT JOIN pg_catalog.pg_description pt ON " + "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n"); appendPQExpBufferStr(q, "WHERE a.attnum > 0::pg_catalog.int2\n" @@ -9189,6 +9198,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) i_attalign = PQfnumber(res, "attalign"); i_attislocal = PQfnumber(res, "attislocal"); i_notnull_name = PQfnumber(res, "notnull_name"); + i_notnull_comment = PQfnumber(res, "notnull_comment"); i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid"); i_notnull_noinherit = PQfnumber(res, "notnull_noinherit"); i_notnull_islocal = PQfnumber(res, "notnull_islocal"); @@ -9257,6 +9267,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *)); tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *)); tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *)); + tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *)); tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool)); tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool)); tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool)); @@ -9288,11 +9299,14 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) determineNotNullFlags(fout, res, r, tbinfo, j, i_notnull_name, + i_notnull_comment, i_notnull_invalidoid, i_notnull_noinherit, i_notnull_islocal, &invalidnotnulloids); + tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ? + NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment)); tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions)); tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation)); tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression)); @@ -9704,8 +9718,9 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) * 4) The column has a constraint with a known name; in that case * notnull_constrs carries that name and dumpTableSchema will print * "CONSTRAINT the_name NOT NULL". However, if the name is the default - * (table_column_not_null), there's no need to print that name in the dump, - * so notnull_constrs is set to the empty string and it behaves as case 2. + * (table_column_not_null) and there's no comment on the constraint, + * there's no need to print that name in the dump, so notnull_constrs + * is set to the empty string and it behaves as case 2. * * In a child table that inherits from a parent already containing NOT NULL * constraints and the columns in the child don't have their own NOT NULL @@ -9732,6 +9747,7 @@ static void determineNotNullFlags(Archive *fout, PGresult *res, int r, TableInfo *tbinfo, int j, int i_notnull_name, + int i_notnull_comment, int i_notnull_invalidoid, int i_notnull_noinherit, int i_notnull_islocal, @@ -9805,11 +9821,13 @@ determineNotNullFlags(Archive *fout, PGresult *res, int r, { /* * In binary upgrade of inheritance child tables, must have a - * constraint name that we can UPDATE later. + * constraint name that we can UPDATE later; same if there's a + * comment on the constraint. */ - if (dopt->binary_upgrade && - !tbinfo->ispartition && - !tbinfo->notnull_islocal) + if ((dopt->binary_upgrade && + !tbinfo->ispartition && + !tbinfo->notnull_islocal) || + !PQgetisnull(res, r, i_notnull_comment)) { tbinfo->notnull_constrs[j] = pstrdup(PQgetvalue(res, r, i_notnull_name)); @@ -17686,6 +17704,56 @@ dumpTableSchema(Archive *fout, const TableInfo *tbinfo) if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL) dumpTableSecLabel(fout, tbinfo, reltypename); + /* + * Dump comments for not-null constraints that aren't to be dumped + * separately (those are processed by collectComments/dumpComment). + */ + if (!fout->dopt->no_comments && dopt->dumpSchema && + fout->remoteVersion >= 180000) + { + PQExpBuffer comment = NULL; + PQExpBuffer tag = NULL; + + for (j = 0; j < tbinfo->numatts; j++) + { + if (tbinfo->notnull_constrs[j] != NULL && + tbinfo->notnull_comment[j] != NULL) + { + if (comment == NULL) + { + comment = createPQExpBuffer(); + tag = createPQExpBuffer(); + } + else + { + resetPQExpBuffer(comment); + resetPQExpBuffer(tag); + } + + appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ", + fmtId(tbinfo->notnull_constrs[j]), qualrelname); + appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout); + appendPQExpBufferStr(comment, ";\n"); + + appendPQExpBuffer(tag, "CONSTRAINT %s ON %s", + fmtId(tbinfo->notnull_constrs[j]), qrelname); + + ArchiveEntry(fout, nilCatalogId, createDumpId(), + ARCHIVE_OPTS(.tag = tag->data, + .namespace = tbinfo->dobj.namespace->dobj.name, + .owner = tbinfo->rolname, + .description = "COMMENT", + .section = SECTION_NONE, + .createStmt = comment->data, + .deps = &(tbinfo->dobj.dumpId), + .nDeps = 1)); + } + } + + destroyPQExpBuffer(comment); + destroyPQExpBuffer(tag); + } + /* Dump comments on inlined table constraints */ for (j = 0; j < tbinfo->ncheck; j++) { diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 7417eab6aefa6..39eef1d6617f4 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -365,6 +365,7 @@ typedef struct _tableInfo * there isn't one on this column. If * empty string, unnamed constraint * (pre-v17) */ + char **notnull_comment; /* comment thereof */ bool *notnull_invalid; /* true for NOT NULL NOT VALID */ bool *notnull_noinh; /* NOT NULL is NO INHERIT */ bool *notnull_islocal; /* true if NOT NULL has local definition */ diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 386e21e0c596a..e1cfa99874ec4 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -1191,7 +1191,9 @@ ) INHERITS (dump_test.test_table_nn, dump_test.test_table_nn_2); ALTER TABLE dump_test.test_table_nn ADD CONSTRAINT nn NOT NULL col1 NOT VALID; ALTER TABLE dump_test.test_table_nn_chld1 VALIDATE CONSTRAINT nn; - ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;', + ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn; + COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS \'nn comment is valid\'; + COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS \'nn_chld2 comment is valid\';', regexp => qr/^ \QALTER TABLE dump_test.test_table_nn\E \n^\s+ \QADD CONSTRAINT nn NOT NULL col1 NOT VALID;\E @@ -1205,6 +1207,34 @@ }, }, + # This constraint is invalid therefore it goes in SECTION_POST_DATA + 'COMMENT ON CONSTRAINT ON test_table_nn' => { + regexp => qr/^ + \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS\E + /xm, + like => { + %full_runs, %dump_test_schema_runs, section_post_data => 1, + }, + unlike => { + exclude_dump_test_schema => 1, + only_dump_measurement => 1, + }, + }, + + # This constraint is valid therefore it goes in SECTION_PRE_DATA + 'COMMENT ON CONSTRAINT ON test_table_chld2' => { + regexp => qr/^ + \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS\E + /xm, + like => { + %full_runs, %dump_test_schema_runs, section_pre_data => 1, + }, + unlike => { + exclude_dump_test_schema => 1, + only_dump_measurement => 1, + }, + }, + 'CONSTRAINT NOT NULL / NOT VALID (child1)' => { regexp => qr/^ \QCREATE TABLE dump_test.test_table_nn_chld1 (\E\n diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out index ad6aaab738538..b5592617d9755 100644 --- a/src/test/regress/expected/constraints.out +++ b/src/test/regress/expected/constraints.out @@ -1659,6 +1659,8 @@ EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}'); constr_parent3 | constr_parent3_a_not_null | t | t | 0 (2 rows) +COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid'; +COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid'; DEALLOCATE get_nnconstraint_info; -- end NOT NULL NOT VALID -- Comments diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql index 337baab7ced93..12668f0e0ce0f 100644 --- a/src/test/regress/sql/constraints.sql +++ b/src/test/regress/sql/constraints.sql @@ -997,6 +997,9 @@ create table constr_parent3 (a int not null); create table constr_child3 () inherits (constr_parent2, constr_parent3); EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}'); +COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid'; +COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid'; + DEALLOCATE get_nnconstraint_info; -- end NOT NULL NOT VALID From 48c80aba7538d7c515d0c89f4d11f88974fee851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Herrera?= Date: Thu, 26 Jun 2025 18:25:05 +0200 Subject: [PATCH 076/181] docs: fix typo --- doc/src/sgml/ref/pg_dump.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index edbb377a5eda1..2ae084b5fa6fc 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -1278,7 +1278,7 @@ PostgreSQL documentation The data section contains actual table data, large-object contents, sequence values, and statistics for tables, - materialized views, and foriegn tables. + materialized views, and foreign tables. Post-data items include definitions of indexes, triggers, rules, statistics for indexes, and constraints other than validated check and not-null constraints. From a3994ec6acb27545300ce1e336e4d119d8000ba9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Herrera?= Date: Thu, 26 Jun 2025 18:33:48 +0200 Subject: [PATCH 077/181] Fix typo in comment Introduced by c2da1a5d6325 Reported-by: Michael Paquier Discussion: https://postgr.es/m/aFt4qeRwrV-3qNix@paquier.xyz --- contrib/pg_stat_statements/pg_stat_statements.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 5597fcaaa053d..e7857f81ec057 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2844,9 +2844,9 @@ generate_normalized_query(JumbleState *jstate, const char *query, /* * If we have an external param at this location, but no lists are * being squashed across the query, then we skip here; this will make - * us print print the characters found in the original query that - * represent the parameter in the next iteration (or after the loop is - * done), which is a bit odd but seems to work okay in most cases. + * us print the characters found in the original query that represent + * the parameter in the next iteration (or after the loop is done), + * which is a bit odd but seems to work okay in most cases. */ if (jstate->clocations[i].extern_param && !jstate->has_squashed_lists) continue; From 060f420a03a8d8186423e7d64302b72e01365c20 Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Thu, 26 Jun 2025 14:25:45 -0400 Subject: [PATCH 078/181] Simplify vacuum VM update logging counters We can simplify the VM counters added in dc6acfd910b8 to lazy_vacuum_heap_page() and lazy_scan_new_or_empty(). We won't invoke lazy_vacuum_heap_page() unless there are dead line pointers, so we know the page can't be all-visible. In lazy_scan_new_or_empty(), we only update the VM if the page-level hint PD_ALL_VISIBLE is clear, and the VM bit cannot be set if the page level bit is clear because a subsequent page update would fail to clear the visibility map bit. Simplify the logic for determining which log counters to increment based on this knowledge. Doing so is worthwhile because the old logic was confusing and misguided. Author: Melanie Plageman Reviewed-by: Nazir Bilal Yavuz Reviewed-by: Masahiko Sawada Discussion: https://postgr.es/m/flat/CAAKRu_a9w_n2mwY%3DG4LjfWTvRTJtjbfvnYAKi4WjO8QXHHrA0g%40mail.gmail.com --- src/backend/access/heap/vacuumlazy.c | 53 +++++++++------------------- 1 file changed, 16 insertions(+), 37 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 09416450af962..8a42e17aec210 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1872,8 +1872,6 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, */ if (!PageIsAllVisible(page)) { - uint8 old_vmbits; - START_CRIT_SECTION(); /* mark buffer dirty before writing a WAL record */ @@ -1893,24 +1891,16 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, log_newpage_buffer(buf, true); PageSetAllVisible(page); - old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf, - InvalidXLogRecPtr, - vmbuffer, InvalidTransactionId, - VISIBILITYMAP_ALL_VISIBLE | - VISIBILITYMAP_ALL_FROZEN); + visibilitymap_set(vacrel->rel, blkno, buf, + InvalidXLogRecPtr, + vmbuffer, InvalidTransactionId, + VISIBILITYMAP_ALL_VISIBLE | + VISIBILITYMAP_ALL_FROZEN); END_CRIT_SECTION(); - /* - * If the page wasn't already set all-visible and/or all-frozen in - * the VM, count it as newly set for logging. - */ - if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0) - { - vacrel->vm_new_visible_pages++; - vacrel->vm_new_visible_frozen_pages++; - } - else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0) - vacrel->vm_new_frozen_pages++; + /* Count the newly all-frozen pages for logging */ + vacrel->vm_new_visible_pages++; + vacrel->vm_new_visible_frozen_pages++; } freespace = PageGetHeapFreeSpace(page); @@ -2915,7 +2905,6 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid, &all_frozen)) { - uint8 old_vmbits; uint8 flags = VISIBILITYMAP_ALL_VISIBLE; if (all_frozen) @@ -2925,25 +2914,15 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, } PageSetAllVisible(page); - old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer, - InvalidXLogRecPtr, - vmbuffer, visibility_cutoff_xid, - flags); + visibilitymap_set(vacrel->rel, blkno, buffer, + InvalidXLogRecPtr, + vmbuffer, visibility_cutoff_xid, + flags); - /* - * If the page wasn't already set all-visible and/or all-frozen in the - * VM, count it as newly set for logging. - */ - if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0) - { - vacrel->vm_new_visible_pages++; - if (all_frozen) - vacrel->vm_new_visible_frozen_pages++; - } - - else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 && - all_frozen) - vacrel->vm_new_frozen_pages++; + /* Count the newly set VM page for logging */ + vacrel->vm_new_visible_pages++; + if (all_frozen) + vacrel->vm_new_visible_frozen_pages++; } /* Revert to the previous phase information for error traceback */ From 483f7246f39b3af250fed1e613d962b85b568861 Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Thu, 26 Jun 2025 15:03:48 -0400 Subject: [PATCH 079/181] Remove unused check in heap_xlog_insert() 8e03eb92e9a reverted the commit 39b66a91bd which allowed freezing in the heap_insert() code path but forgot to remove the corresponding check in heap_xlog_insert(). This code is extraneous but not harmful. However, cleaning it up makes it very clear that, as of now, we do not support any freezing of pages in the heap_insert() path. Author: Melanie Plageman Reviewed-by: Tomas Vondra Discussion: https://postgr.es/m/flat/CAAKRu_Zp4Pi-t51OFWm1YZ-cctDfBhHCMZ%3DEx6PKxv0o8y2GvA%40mail.gmail.com Backpatch-through: 14 --- src/backend/access/heap/heapam_xlog.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c index 30f4c2d3c6719..eb4bd3d6ae3a3 100644 --- a/src/backend/access/heap/heapam_xlog.c +++ b/src/backend/access/heap/heapam_xlog.c @@ -438,6 +438,9 @@ heap_xlog_insert(XLogReaderState *record) ItemPointerSetBlockNumber(&target_tid, blkno); ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum); + /* No freezing in the heap_insert() code path */ + Assert(!(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)); + /* * The visibility map may need to be fixed even if the heap page is * already up-to-date. @@ -508,10 +511,6 @@ heap_xlog_insert(XLogReaderState *record) if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) PageClearAllVisible(page); - /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */ - if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) - PageSetAllVisible(page); - MarkBufferDirty(buffer); } if (BufferIsValid(buffer)) From 95e12d4d9b228855af8a2a34ca28c33924d4edd1 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Thu, 26 Jun 2025 22:02:16 +0200 Subject: [PATCH 080/181] Correct misleading error messages Commit 7d6d2c4bbd7 dropped opcintype from the index AM strategy translation API. But some error messages about failed lookups still mentioned it, even though it was not used for the lookup. Fix by removing ipcintype from the error messages as well. --- src/backend/commands/indexcmds.c | 4 ++-- src/backend/commands/tablecmds.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index f2898fee5fcd5..6f753ab6d7a0d 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -2469,8 +2469,8 @@ GetOperatorFromCompareType(Oid opclass, Oid rhstype, CompareType cmptype, cmptype == COMPARE_EQ ? errmsg("could not identify an equality operator for type %s", format_type_be(opcintype)) : cmptype == COMPARE_OVERLAP ? errmsg("could not identify an overlaps operator for type %s", format_type_be(opcintype)) : cmptype == COMPARE_CONTAINED_BY ? errmsg("could not identify a contained-by operator for type %s", format_type_be(opcintype)) : 0, - errdetail("Could not translate compare type %d for operator family \"%s\", input type %s, access method \"%s\".", - cmptype, get_opfamily_name(opfamily, false), format_type_be(opcintype), get_am_name(amid))); + errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".", + cmptype, get_opfamily_name(opfamily, false), get_am_name(amid))); /* * We parameterize rhstype so foreign keys can ask for a <@ operator diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 1c3ad74e7b9e9..e2b94c8c6098b 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -10330,8 +10330,8 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, for_overlaps ? errmsg("could not identify an overlaps operator for foreign key") : errmsg("could not identify an equality operator for foreign key"), - errdetail("Could not translate compare type %d for operator family \"%s\", input type %s, access method \"%s\".", - cmptype, get_opfamily_name(opfamily, false), format_type_be(opcintype), get_am_name(amid))); + errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".", + cmptype, get_opfamily_name(opfamily, false), get_am_name(amid))); /* * There had better be a primary equality operator for the index. From 7fb3c38e7d7d12a742e1e7600879570251e1886a Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Thu, 26 Jun 2025 22:13:53 +0200 Subject: [PATCH 081/181] libpq: Message style improvements --- src/interfaces/libpq/fe-connect.c | 2 +- src/interfaces/libpq/fe-protocol3.c | 7 ++++--- src/interfaces/libpq/fe-secure-openssl.c | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index ccb01aad36109..51a9c41658455 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -2141,7 +2141,7 @@ pqConnectOptions2(PGconn *conn) if (conn->min_pversion > conn->max_pversion) { conn->status = CONNECTION_BAD; - libpq_append_conn_error(conn, "min_protocol_version is greater than max_protocol_version"); + libpq_append_conn_error(conn, "\"%s\" is greater than \"%s\"", "min_protocol_version", "max_protocol_version"); return false; } diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index beb1c889aad73..1599de757d130 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -1434,7 +1434,7 @@ pqGetNegotiateProtocolVersion3(PGconn *conn) /* 3.1 never existed, we went straight from 3.0 to 3.2 */ if (their_version == PG_PROTOCOL(3, 1)) { - libpq_append_conn_error(conn, "received invalid protocol negotiation message: server requests downgrade to non-existent 3.1 protocol version"); + libpq_append_conn_error(conn, "received invalid protocol negotiation message: server requested downgrade to non-existent 3.1 protocol version"); goto failure; } @@ -1452,9 +1452,10 @@ pqGetNegotiateProtocolVersion3(PGconn *conn) if (their_version < conn->min_pversion) { - libpq_append_conn_error(conn, "server only supports protocol version %d.%d, but min_protocol_version was set to %d.%d", + libpq_append_conn_error(conn, "server only supports protocol version %d.%d, but \"%s\" was set to %d.%d", PG_PROTOCOL_MAJOR(their_version), PG_PROTOCOL_MINOR(their_version), + "min_protocol_version", PG_PROTOCOL_MAJOR(conn->min_pversion), PG_PROTOCOL_MINOR(conn->min_pversion)); @@ -1476,7 +1477,7 @@ pqGetNegotiateProtocolVersion3(PGconn *conn) } if (strncmp(conn->workBuffer.data, "_pq_.", 5) != 0) { - libpq_append_conn_error(conn, "received invalid protocol negotiation message: server reported unsupported parameter name without a _pq_. prefix (\"%s\")", conn->workBuffer.data); + libpq_append_conn_error(conn, "received invalid protocol negotiation message: server reported unsupported parameter name without a \"%s\" prefix (\"%s\")", "_pq_.", conn->workBuffer.data); goto failure; } libpq_append_conn_error(conn, "received invalid protocol negotiation message: server reported an unsupported parameter that was not requested (\"%s\")", conn->workBuffer.data); diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 78f9e84eb353b..b08b3a6901b77 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -711,7 +711,7 @@ SSL_CTX_keylog_cb(const SSL *ssl, const char *line) if (fd == -1) { - libpq_append_conn_error(conn, "could not open ssl keylog file \"%s\": %s", + libpq_append_conn_error(conn, "could not open SSL key logging file \"%s\": %s", conn->sslkeylogfile, pg_strerror(errno)); return; } @@ -719,7 +719,7 @@ SSL_CTX_keylog_cb(const SSL *ssl, const char *line) /* line is guaranteed by OpenSSL to be NUL terminated */ rc = write(fd, line, strlen(line)); if (rc < 0) - libpq_append_conn_error(conn, "could not write to ssl keylog file \"%s\": %s", + libpq_append_conn_error(conn, "could not write to SSL key logging file \"%s\": %s", conn->sslkeylogfile, pg_strerror(errno)); else rc = write(fd, "\n", 1); From 94e2e150ec72a3b37e3847be99c4aca3320c38f9 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Fri, 27 Jun 2025 09:31:23 +0900 Subject: [PATCH 082/181] Correct list of files in src/backend/lib/README binaryheap.c and stringinfo.c have been moved to src/common/ by respectively 5af0263afd7b and 26aaf97b683d, and the README patched here still mentioned these two files as available in src/backend/lib/. Author: Aleksander Alekseev Discussion: https://postgr.es/m/CAJ7c6TPg-=tC+fzq0tGTtmL7r79-aWeCmpwAyQiGu0N+sKGj8Q@mail.gmail.com --- src/backend/lib/README | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/backend/lib/README b/src/backend/lib/README index f2fb591237dba..c28cbe356f0b3 100644 --- a/src/backend/lib/README +++ b/src/backend/lib/README @@ -1,8 +1,6 @@ This directory contains a general purpose data structures, for use anywhere in the backend: -binaryheap.c - a binary heap - bipartite_match.c - Hopcroft-Karp maximum cardinality algorithm for bipartite graphs bloomfilter.c - probabilistic, space-efficient set membership testing @@ -21,8 +19,6 @@ pairingheap.c - a pairing heap rbtree.c - a red-black tree -stringinfo.c - an extensible string type - Aside from the inherent characteristics of the data structures, there are a few practical differences between the binary heap and the pairing heap. The From 7195c804bd12f47a9f1b2df9c2e1794bb04c5987 Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Fri, 27 Jun 2025 11:49:00 +0300 Subject: [PATCH 083/181] Fix CheckPointReplicationSlots() with max_replication_slots == 0 ca307d5cec90 made CheckPointReplicationSlots() unconditionally call ReplicationSlotsComputeRequiredLSN(). It causes an assertion trap when max_replication_slots equals 0. This commit makes CheckPointReplicationSlots() call ReplicationSlotsComputeRequiredLSN() only when at least one slot gets its last_saved_restart_lsn updated. That avoids an assert trap and also saves some cycles when no one slot has last_saved_restart_lsn updated. Based on ideas from Dilip Kumar and Hayato Kuroda . Reported-by: Zhijie Hou Discussion: https://postgr.es/m/OS0PR01MB5716BB506AF934376FF3A8BB947BA%40OS0PR01MB5716.jpnprd01.prod.outlook.com --- src/backend/replication/slot.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index c11e588d63221..f9fec50ae883f 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -2079,6 +2079,7 @@ void CheckPointReplicationSlots(bool is_shutdown) { int i; + bool last_saved_restart_lsn_updated = false; elog(DEBUG1, "performing replication slot checkpoint"); @@ -2123,15 +2124,23 @@ CheckPointReplicationSlots(bool is_shutdown) SpinLockRelease(&s->mutex); } + /* + * Track if we're going to update slot's last_saved_restart_lsn. We + * need this to know if we need to recompute the required LSN. + */ + if (s->last_saved_restart_lsn != s->data.restart_lsn) + last_saved_restart_lsn_updated = true; + SaveSlotToPath(s, path, LOG); } LWLockRelease(ReplicationSlotAllocationLock); /* - * Recompute the required LSN as SaveSlotToPath() updated - * last_saved_restart_lsn for slots. + * Recompute the required LSN if SaveSlotToPath() updated + * last_saved_restart_lsn for any slot. */ - ReplicationSlotsComputeRequiredLSN(); + if (last_saved_restart_lsn_updated) + ReplicationSlotsComputeRequiredLSN(); } /* From bbccf7ecb363e50ae9d9aa71d0e7c6d49ee0bb06 Mon Sep 17 00:00:00 2001 From: Nathan Bossart Date: Fri, 27 Jun 2025 13:37:26 -0500 Subject: [PATCH 084/181] Use correct DatumGet*() function in test_shm_mq_main(). This is purely cosmetic, as dsm_attach() interprets its argument as a dsm_handle (i.e., an unsigned integer), but we might as well fix it. Oversight in commit 4db3744f1f. Author: Jianghua Yang Reviewed-by: Tom Lane Discussion: https://postgr.es/m/CAAZLFmRxkUD5jRs0W3K%3DUe4_ZS%2BRcAb0PCE1S0vVJBn3sWH2UQ%40mail.gmail.com Backpatch-through: 13 --- src/test/modules/test_shm_mq/worker.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c index 96cd304dbbc83..c1d321b69a427 100644 --- a/src/test/modules/test_shm_mq/worker.c +++ b/src/test/modules/test_shm_mq/worker.c @@ -77,7 +77,7 @@ test_shm_mq_main(Datum main_arg) * exit, which is fine. If there were a ResourceOwner, it would acquire * ownership of the mapping, but we have no need for that. */ - seg = dsm_attach(DatumGetInt32(main_arg)); + seg = dsm_attach(DatumGetUInt32(main_arg)); if (seg == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), From 50fd428b2b9cb036c9c5982b56443d7e28119707 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sat, 28 Jun 2025 19:18:06 +0200 Subject: [PATCH 085/181] Message style improvements --- src/backend/access/heap/vacuumlazy.c | 2 +- src/backend/catalog/heap.c | 2 +- src/backend/commands/matview.c | 3 ++- src/backend/commands/publicationcmds.c | 4 ++-- src/backend/commands/subscriptioncmds.c | 6 +++--- src/backend/commands/tablecmds.c | 11 ++++++----- src/backend/libpq/be-secure-openssl.c | 4 ++-- src/backend/replication/logical/launcher.c | 2 +- src/backend/replication/logical/slotsync.c | 4 ++-- src/backend/replication/pgoutput/pgoutput.c | 2 +- src/backend/tcop/backend_startup.c | 6 +++--- src/backend/utils/adt/formatting.c | 5 +++-- src/backend/utils/misc/guc_tables.c | 4 ++-- src/test/regress/expected/generated_virtual.out | 4 ++-- src/test/regress/expected/inherit.out | 4 ++-- src/test/regress/expected/matview.out | 2 +- src/test/regress/expected/publication.out | 5 +++-- src/test/regress/expected/without_overlaps.out | 4 ++-- src/test/subscription/t/024_add_drop_pub.pl | 2 +- 19 files changed, 40 insertions(+), 36 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 8a42e17aec210..4111a8996b5a1 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1428,7 +1428,7 @@ lazy_scan_heap(LVRelState *vacrel) */ if (vacrel->eager_scan_max_fails_per_region > 0) ereport(vacrel->verbose ? INFO : DEBUG2, - (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"", + (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"", orig_eager_scan_success_limit, vacrel->dbname, vacrel->relnamespace, vacrel->relname))); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 649d3966e8e21..fd6537567ea27 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -3006,7 +3006,7 @@ AddRelationNotNullConstraints(Relation rel, List *constraints, if (constr->is_no_inherit) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("cannot define not-null constraint on column \"%s\" with NO INHERIT", + errmsg("cannot define not-null constraint with NO INHERIT on column \"%s\"", strVal(linitial(constr->keys))), errdetail("The column has an inherited not-null constraint."))); diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 27c2cb26ef5f3..188e26f0e6e29 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -835,7 +835,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, if (!foundUniqueIndex) ereport(ERROR, errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("could not find suitable unique index on materialized view")); + errmsg("could not find suitable unique index on materialized view \"%s\"", + RelationGetRelationName(matviewRel))); appendStringInfoString(&querybuf, " AND newdata.* OPERATOR(pg_catalog.*=) mv.*) " diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 0b23d94c38e20..1bf7eaae5b362 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -2130,8 +2130,8 @@ defGetGeneratedColsOption(DefElem *def) ereport(ERROR, errcode(ERRCODE_SYNTAX_ERROR), - errmsg("%s requires a \"none\" or \"stored\" value", - def->defname)); + errmsg("invalid value for publication parameter \"%s\": \"%s\"", def->defname, sval), + errdetail("Valid values are \"%s\" and \"%s\".", "none", "stored")); return PUBLISH_GENCOLS_NONE; /* keep compiler quiet */ } diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index 4aec73bcc6bbc..4ff246cd94321 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -1267,7 +1267,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, IsSet(opts.specified_opts, SUBOPT_SLOT_NAME)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("slot_name and two_phase cannot be altered at the same time"))); + errmsg("\"slot_name\" and \"two_phase\" cannot be altered at the same time"))); /* * Note that workers may still survive even if the @@ -1283,7 +1283,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, if (logicalrep_workers_find(subid, true, true)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot alter two_phase when logical replication worker is still running"), + errmsg("cannot alter \"two_phase\" when logical replication worker is still running"), errhint("Try again after some time."))); /* @@ -1297,7 +1297,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt, LookupGXactBySubid(subid)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot disable two_phase when prepared transactions are present"), + errmsg("cannot disable \"two_phase\" when prepared transactions exist"), errhint("Resolve these transactions and try again."))); /* Change system catalog accordingly */ diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index e2b94c8c6098b..991bc946ffc44 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -8609,7 +8609,7 @@ ATExecSetExpression(AlteredTableInfo *tab, Relation rel, const char *colName, rel->rd_att->constr && rel->rd_att->constr->num_check > 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints"), + errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints"), errdetail("Column \"%s\" of relation \"%s\" is a virtual generated column.", colName, RelationGetRelationName(rel)))); @@ -8627,7 +8627,7 @@ ATExecSetExpression(AlteredTableInfo *tab, Relation rel, const char *colName, GetRelationPublications(RelationGetRelid(rel)) != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication"), + errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication"), errdetail("Column \"%s\" of relation \"%s\" is a virtual generated column.", colName, RelationGetRelationName(rel)))); @@ -10189,7 +10189,7 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, if (pk_has_without_overlaps && !with_period) ereport(ERROR, errcode(ERRCODE_INVALID_FOREIGN_KEY), - errmsg("foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS")); + errmsg("foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS")); /* * Now we can check permissions. @@ -12913,8 +12913,9 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName, con->contype != CONSTRAINT_NOTNULL) ereport(ERROR, errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("constraint \"%s\" of relation \"%s\" is not a foreign key, check, or not-null constraint", - constrName, RelationGetRelationName(rel))); + errmsg("cannot validate constraint \"%s\" of relation \"%s\"", + constrName, RelationGetRelationName(rel)), + errdetail("This operation is not supported for this type of constraint.")); if (!con->conenforced) ereport(ERROR, diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 64ff3ce3d6a7a..c8b63ef824900 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -1436,10 +1436,10 @@ initialize_ecdh(SSL_CTX *context, bool isServerStart) */ ereport(isServerStart ? FATAL : LOG, errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("failed to set group names specified in ssl_groups: %s", + errmsg("could not set group names specified in ssl_groups: %s", SSLerrmessageExt(ERR_get_error(), _("No valid groups found"))), - errhint("Ensure that each group name is spelled correctly and supported by the installed version of OpenSSL")); + errhint("Ensure that each group name is spelled correctly and supported by the installed version of OpenSSL.")); return false; } #endif diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 14d8efbd25bf5..4aed0dfcebb24 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -341,7 +341,7 @@ logicalrep_worker_launch(LogicalRepWorkerType wtype, if (max_active_replication_origins == 0) ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), - errmsg("cannot start logical replication workers when \"max_active_replication_origins\"=0"))); + errmsg("cannot start logical replication workers when \"max_active_replication_origins\" is 0"))); /* * We need to do the modification of the shared memory under lock so that diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c index f1dcbebfa1ae7..3ec3abfa3da60 100644 --- a/src/backend/replication/logical/slotsync.c +++ b/src/backend/replication/logical/slotsync.c @@ -213,7 +213,7 @@ update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid, ereport(slot->data.persistency == RS_TEMPORARY ? LOG : DEBUG1, errmsg("could not synchronize replication slot \"%s\"", remote_slot->name), - errdetail("Synchronization could lead to data loss as the remote slot needs WAL at LSN %X/%X and catalog xmin %u, but the standby has LSN %X/%X and catalog xmin %u.", + errdetail("Synchronization could lead to data loss, because the remote slot needs WAL at LSN %X/%X and catalog xmin %u, but the standby has LSN %X/%X and catalog xmin %u.", LSN_FORMAT_ARGS(remote_slot->restart_lsn), remote_slot->catalog_xmin, LSN_FORMAT_ARGS(slot->data.restart_lsn), @@ -593,7 +593,7 @@ update_and_persist_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid) { ereport(LOG, errmsg("could not synchronize replication slot \"%s\"", remote_slot->name), - errdetail("Synchronization could lead to data loss as standby could not build a consistent snapshot to decode WALs at LSN %X/%X.", + errdetail("Synchronization could lead to data loss, because the standby could not build a consistent snapshot to decode WALs at LSN %X/%X.", LSN_FORMAT_ARGS(slot->data.restart_lsn))); return false; diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 693a766e6d75f..082b4d9d32798 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -1789,7 +1789,7 @@ LoadPublications(List *pubnames) else ereport(WARNING, errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("skipped loading publication: %s", pubname), + errmsg("skipped loading publication \"%s\"", pubname), errdetail("The publication does not exist at this point in the WAL."), errhint("Create the publication if it does not exist.")); } diff --git a/src/backend/tcop/backend_startup.c b/src/backend/tcop/backend_startup.c index a7d1fec981f88..ad0af5edc1f21 100644 --- a/src/backend/tcop/backend_startup.c +++ b/src/backend/tcop/backend_startup.c @@ -881,7 +881,7 @@ ProcessCancelRequestPacket(Port *port, void *pkt, int pktlen) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("invalid length of query cancel packet"))); + errmsg("invalid length of cancel request packet"))); return; } len = pktlen - offsetof(CancelRequestPacket, cancelAuthCode); @@ -889,7 +889,7 @@ ProcessCancelRequestPacket(Port *port, void *pkt, int pktlen) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("invalid length of query cancel key"))); + errmsg("invalid length of cancel key in cancel request packet"))); return; } @@ -1077,7 +1077,7 @@ check_log_connections(char **newval, void **extra, GucSource source) if (!SplitIdentifierString(rawstring, ',', &elemlist)) { - GUC_check_errdetail("Invalid list syntax in parameter \"log_connections\"."); + GUC_check_errdetail("Invalid list syntax in parameter \"%s\".", "log_connections"); pfree(rawstring); list_free(elemlist); return false; diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 5bd1e01f7e463..1d05481181db7 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -3590,14 +3590,15 @@ DCH_from_char(FormatNode *node, const char *in, TmFromChar *out, if (matched < 2) ereturn(escontext,, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), - errmsg("invalid input string for \"Y,YYY\""))); + errmsg("invalid value \"%s\" for \"%s\"", + s, "Y,YYY"))); /* years += (millennia * 1000); */ if (pg_mul_s32_overflow(millennia, 1000, &millennia) || pg_add_s32_overflow(years, millennia, &years)) ereturn(escontext,, (errcode(ERRCODE_DATETIME_FIELD_OVERFLOW), - errmsg("value for \"Y,YYY\" in source string is out of range"))); + errmsg("value for \"%s\" in source string is out of range", "Y,YYY"))); if (!from_char_set_int(&out->year, years, n, escontext)) return; diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index f04bfedb2fd10..511dc32d51921 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -1028,7 +1028,7 @@ struct config_bool ConfigureNamesBool[] = }, { {"enable_distinct_reordering", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables reordering of DISTINCT pathkeys."), + gettext_noop("Enables reordering of DISTINCT keys."), NULL, GUC_EXPLAIN }, @@ -4837,7 +4837,7 @@ struct config_string ConfigureNamesString[] = { {"ssl_groups", PGC_SIGHUP, CONN_AUTH_SSL, gettext_noop("Sets the group(s) to use for Diffie-Hellman key exchange."), - gettext_noop("Multiple groups can be specified using colon-separated list."), + gettext_noop("Multiple groups can be specified using a colon-separated list."), GUC_SUPERUSER_ONLY }, &SSLECDHCurve, diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out index 46713f06797e5..df704b5166fa3 100644 --- a/src/test/regress/expected/generated_virtual.out +++ b/src/test/regress/expected/generated_virtual.out @@ -634,10 +634,10 @@ INSERT INTO gtest20 (a) VALUES (30); -- violates constraint ERROR: new row for relation "gtest20" violates check constraint "gtest20_b_check" DETAIL: Failing row contains (30, virtual). ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 100); -- violates constraint (currently not supported) -ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints +ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints DETAIL: Column "b" of relation "gtest20" is a virtual generated column. ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 3); -- ok (currently not supported) -ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints +ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints DETAIL: Column "b" of relation "gtest20" is a virtual generated column. CREATE TABLE gtest20a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL); INSERT INTO gtest20a (a) VALUES (10); diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out index f9b0c415cfdcc..78dead65325e9 100644 --- a/src/test/regress/expected/inherit.out +++ b/src/test/regress/expected/inherit.out @@ -2281,7 +2281,7 @@ Inherits: pp1, create table cc3 (a2 int not null no inherit) inherits (cc1); NOTICE: moving and merging column "a2" with inherited definition DETAIL: User-specified column moved to the position of the inherited column. -ERROR: cannot define not-null constraint on column "a2" with NO INHERIT +ERROR: cannot define not-null constraint with NO INHERIT on column "a2" DETAIL: The column has an inherited not-null constraint. -- change NO INHERIT status of inherited constraint: no dice, it's inherited alter table cc2 add not null a2 no inherit; @@ -2530,7 +2530,7 @@ ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a" CREATE TABLE inh_nn1 (a int not null); CREATE TABLE inh_nn2 (a int not null no inherit) INHERITS (inh_nn1); NOTICE: merging column "a" with inherited definition -ERROR: cannot define not-null constraint on column "a" with NO INHERIT +ERROR: cannot define not-null constraint with NO INHERIT on column "a" DETAIL: The column has an inherited not-null constraint. CREATE TABLE inh_nn3 (a int not null, b int, not null a no inherit); ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a" diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out index 54939ecc6b08a..c56c9fa3a2544 100644 --- a/src/test/regress/expected/matview.out +++ b/src/test/regress/expected/matview.out @@ -587,7 +587,7 @@ CREATE MATERIALIZED VIEW drop_idx_matview AS NOTICE: index "mvtest_drop_idx" does not exist, skipping CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i); REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview; -ERROR: could not find suitable unique index on materialized view +ERROR: could not find suitable unique index on materialized view "drop_idx_matview" DROP MATERIALIZED VIEW drop_idx_matview; -- clean up RESET search_path; -- make sure that create WITH NO DATA works via SPI diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index f1025fc0f198d..3a2eacd793f70 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -34,7 +34,8 @@ ERROR: conflicting or redundant options LINE 1: ...pub_xxx WITH (publish_generated_columns = stored, publish_ge... ^ CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo); -ERROR: publish_generated_columns requires a "none" or "stored" value +ERROR: invalid value for publication parameter "publish_generated_columns": "foo" +DETAIL: Valid values are "none" and "stored". \dRp List of publications Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root @@ -539,7 +540,7 @@ SET client_min_messages = 'ERROR'; CREATE TABLE testpub_rf_tbl7 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * 111) VIRTUAL); CREATE PUBLICATION testpub8 FOR TABLE testpub_rf_tbl7 WHERE (y > 100); ALTER TABLE testpub_rf_tbl7 ALTER COLUMN y SET EXPRESSION AS (x * testpub_rf_func2()); -ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication +ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication DETAIL: Column "y" of relation "testpub_rf_tbl7" is a virtual generated column. RESET client_min_messages; DROP TABLE testpub_rf_tbl1; diff --git a/src/test/regress/expected/without_overlaps.out b/src/test/regress/expected/without_overlaps.out index ea607bed0a412..f3144bdc39c21 100644 --- a/src/test/regress/expected/without_overlaps.out +++ b/src/test/regress/expected/without_overlaps.out @@ -1426,7 +1426,7 @@ CREATE TABLE temporal_fk_rng2rng ( CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at) REFERENCES temporal_rng (id, valid_at) ); -ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS +ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS -- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at) -- FOREIGN KEY part should specify PERIOD CREATE TABLE temporal_fk_rng2rng ( @@ -1900,7 +1900,7 @@ CREATE TABLE temporal_fk_mltrng2mltrng ( CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at) REFERENCES temporal_mltrng (id, valid_at) ); -ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS +ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS -- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at) -- FOREIGN KEY part should specify PERIOD CREATE TABLE temporal_fk_mltrng2mltrng ( diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl index e995d8b383901..5298d43197900 100644 --- a/src/test/subscription/t/024_add_drop_pub.pl +++ b/src/test/subscription/t/024_add_drop_pub.pl @@ -112,7 +112,7 @@ # Verify that a warning is logged. $node_publisher->wait_for_log( - qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication: tap_pub_3/, $offset); + qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, $offset); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3"); From 6d12d5a433c9c8cbf92fc19afd2a3465f275564c Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sun, 29 Jun 2025 17:02:35 +0200 Subject: [PATCH 086/181] pg_recvlogical: Rename --two-phase and --failover options. This commit renames the pg_recvlogical options --two-phase and --failover to --enable-two-phase and --enable-failover, respectively. The new names distinguish these enabling options from action options like --start and --create-slot, while clearly indicating their purpose to enable specific logical slot features. The option --failover is new in PostgreSQL 18 (commit cf2655a9029), so no compatibility break there. The option --two-phase has existed since PostgreSQL 15 (commit cda03cfed6b), so for compatibility we keep the old option name --two-phase around as deprecated. Also note that pg_createsubscriber has acquired an --enable-two-phase option, so this increases consistency across tools. Co-authored-by: Masahiko Sawada Discussion: https://postgr.es/m/a28f66df-1354-4709-8d63-932ded4cac35@eisentraut.org --- doc/src/sgml/logicaldecoding.sgml | 2 +- doc/src/sgml/ref/pg_recvlogical.sgml | 9 +++++---- src/bin/pg_basebackup/pg_recvlogical.c | 20 ++++++++++--------- src/bin/pg_basebackup/t/030_pg_recvlogical.pl | 4 ++-- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index 5c5957e0d37a1..fc288d691b9f6 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -169,7 +169,7 @@ COMMIT 693 $ pg_recvlogical -d postgres --slot=test --drop-slot Example 2: -$ pg_recvlogical -d postgres --slot=test --create-slot --two-phase +$ pg_recvlogical -d postgres --slot=test --create-slot --enable-two-phase $ pg_recvlogical -d postgres --slot=test --start -f - ControlZ $ psql -d postgres -c "BEGIN;INSERT INTO data(data) VALUES('5');PREPARE TRANSACTION 'test';" diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml index 63a45c7018a45..f68182266a9fa 100644 --- a/doc/src/sgml/ref/pg_recvlogical.sgml +++ b/doc/src/sgml/ref/pg_recvlogical.sgml @@ -79,8 +79,8 @@ PostgreSQL documentation - The and options - can be specified with . + The and + options can be specified with . @@ -166,7 +166,7 @@ PostgreSQL documentation - + Enables the slot to be synchronized to the standbys. This option may @@ -300,7 +300,8 @@ PostgreSQL documentation - + + (deprecated) Enables decoding of prepared transactions. This option may only be specified with diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index 4b4b545917d7d..fb7a6a1d05d8d 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -41,8 +41,8 @@ typedef enum /* Global Options */ static char *outfile = NULL; static int verbose = 0; -static bool two_phase = false; -static bool failover = false; +static bool two_phase = false; /* enable-two-phase option */ +static bool failover = false; /* enable-failover option */ static int noloop = 0; static int standby_message_timeout = 10 * 1000; /* 10 sec = default */ static int fsync_interval = 10 * 1000; /* 10 sec = default */ @@ -89,9 +89,9 @@ usage(void) printf(_(" --drop-slot drop the replication slot (for the slot's name see --slot)\n")); printf(_(" --start start streaming in a replication slot (for the slot's name see --slot)\n")); printf(_("\nOptions:\n")); - printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n")); - printf(_(" --failover enable replication slot synchronization to standby servers when\n" + printf(_(" --enable-failover enable replication slot synchronization to standby servers when\n" " creating a replication slot\n")); + printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n")); printf(_(" -f, --file=FILE receive log into this file, - for stdout\n")); printf(_(" -F --fsync-interval=SECS\n" " time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000)); @@ -105,7 +105,8 @@ usage(void) printf(_(" -s, --status-interval=SECS\n" " time between status packets sent to server (default: %d)\n"), (standby_message_timeout / 1000)); printf(_(" -S, --slot=SLOTNAME name of the logical replication slot\n")); - printf(_(" -t, --two-phase enable decoding of prepared transactions when creating a slot\n")); + printf(_(" -t, --enable-two-phase enable decoding of prepared transactions when creating a slot\n")); + printf(_(" --two-phase (same as --enable-two-phase, deprecated)\n")); printf(_(" -v, --verbose output verbose messages\n")); printf(_(" -V, --version output version information, then exit\n")); printf(_(" -?, --help show this help, then exit\n")); @@ -698,9 +699,10 @@ main(int argc, char **argv) {"file", required_argument, NULL, 'f'}, {"fsync-interval", required_argument, NULL, 'F'}, {"no-loop", no_argument, NULL, 'n'}, - {"failover", no_argument, NULL, 5}, + {"enable-failover", no_argument, NULL, 5}, + {"enable-two-phase", no_argument, NULL, 't'}, + {"two-phase", no_argument, NULL, 't'}, /* deprecated */ {"verbose", no_argument, NULL, 'v'}, - {"two-phase", no_argument, NULL, 't'}, {"version", no_argument, NULL, 'V'}, {"help", no_argument, NULL, '?'}, /* connection options */ @@ -928,14 +930,14 @@ main(int argc, char **argv) { if (two_phase) { - pg_log_error("--two-phase may only be specified with --create-slot"); + pg_log_error("%s may only be specified with --create-slot", "--enable-two-phase"); pg_log_error_hint("Try \"%s --help\" for more information.", progname); exit(1); } if (failover) { - pg_log_error("--failover may only be specified with --create-slot"); + pg_log_error("%s may only be specified with --create-slot", "--enable-failover"); pg_log_error_hint("Try \"%s --help\" for more information.", progname); exit(1); } diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl index c82e78847b382..5f46357e72ac7 100644 --- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl +++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl @@ -110,7 +110,7 @@ '--dbname' => $node->connstr('postgres'), '--start', '--endpos' => $nextlsn, - '--two-phase', '--no-loop', + '--enable-two-phase', '--no-loop', '--file' => '-', ], 'incorrect usage'); @@ -142,7 +142,7 @@ '--slot' => 'test', '--dbname' => $node->connstr('postgres'), '--create-slot', - '--failover', + '--enable-failover', ], 'slot with failover created'); From 8319e5cb5493046e65d60da3cc17ab78c91749b1 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 29 Jun 2025 13:56:03 -0400 Subject: [PATCH 087/181] Obtain required table lock during cross-table constraint updates. Sometimes a table's constraint may depend on a column of another table, so that we have to update the constraint when changing the referenced column's type. We need to have lock on the constraint's table to do that. ATPostAlterTypeCleanup believed that this case was only possible for FOREIGN KEY constraints, but it's wrong at least for CHECK and EXCLUDE constraints; and in general, we'd probably need exclusive lock to alter any sort of constraint. So just remove the contype check and acquire lock for any other table. This prevents a "you don't have lock" assertion failure, though no ill effect is observed in production builds. We'll error out later anyway because we don't presently support physically altering column types within stored composite columns. But the catalog-munging is basically all there, so we may as well make that part work. Bug: #18970 Reported-by: Alexander Lakhin Diagnosed-by: jian he Author: Tom Lane Discussion: https://postgr.es/m/18970-a7d1cfe1f8d5d8d9@postgresql.org Backpatch-through: 13 --- src/backend/commands/tablecmds.c | 21 +++++++++++---------- src/test/regress/expected/alter_table.out | 7 +++++++ src/test/regress/sql/alter_table.sql | 9 +++++++++ 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 991bc946ffc44..b8837f26cb4fd 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -15415,9 +15415,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) /* * Re-parse the index and constraint definitions, and attach them to the * appropriate work queue entries. We do this before dropping because in - * the case of a FOREIGN KEY constraint, we might not yet have exclusive - * lock on the table the constraint is attached to, and we need to get - * that before reparsing/dropping. + * the case of a constraint on another table, we might not yet have + * exclusive lock on the table the constraint is attached to, and we need + * to get that before reparsing/dropping. (That's possible at least for + * FOREIGN KEY, CHECK, and EXCLUSION constraints; in non-FK cases it + * requires a dependency on the target table's composite type in the other + * table's constraint expressions.) * * We can't rely on the output of deparsing to tell us which relation to * operate on, because concurrent activity might have made the name @@ -15433,7 +15436,6 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) Form_pg_constraint con; Oid relid; Oid confrelid; - char contype; bool conislocal; tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId)); @@ -15450,7 +15452,6 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) elog(ERROR, "could not identify relation associated with constraint %u", oldId); } confrelid = con->confrelid; - contype = con->contype; conislocal = con->conislocal; ReleaseSysCache(tup); @@ -15468,12 +15469,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) continue; /* - * When rebuilding an FK constraint that references the table we're - * modifying, we might not yet have any lock on the FK's table, so get - * one now. We'll need AccessExclusiveLock for the DROP CONSTRAINT - * step, so there's no value in asking for anything weaker. + * When rebuilding another table's constraint that references the + * table we're modifying, we might not yet have any lock on the other + * table, so get one now. We'll need AccessExclusiveLock for the DROP + * CONSTRAINT step, so there's no value in asking for anything weaker. */ - if (relid != tab->relid && contype == CONSTRAINT_FOREIGN) + if (relid != tab->relid) LockRelationOid(relid, AccessExclusiveLock); ATPostAlterTypeParse(oldId, relid, confrelid, diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 476266e3f4b03..750efc042d8ee 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -4745,6 +4745,13 @@ alter table attbl alter column p1 set data type bigint; alter table atref alter column c1 set data type bigint; drop table attbl, atref; /* End test case for bug #17409 */ +/* Test case for bug #18970 */ +create table attbl(a int); +create table atref(b attbl check ((b).a is not null)); +alter table attbl alter column a type numeric; -- someday this should work +ERROR: cannot alter table "attbl" because column "atref.b" uses its row type +drop table attbl, atref; +/* End test case for bug #18970 */ -- Test that ALTER TABLE rewrite preserves a clustered index -- for normal indexes and indexes on constraints. create table alttype_cluster (a int); diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index 5ce9d1e429f81..41cff198e183c 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -3069,6 +3069,15 @@ drop table attbl, atref; /* End test case for bug #17409 */ +/* Test case for bug #18970 */ + +create table attbl(a int); +create table atref(b attbl check ((b).a is not null)); +alter table attbl alter column a type numeric; -- someday this should work +drop table attbl, atref; + +/* End test case for bug #18970 */ + -- Test that ALTER TABLE rewrite preserves a clustered index -- for normal indexes and indexes on constraints. create table alttype_cluster (a int); From 66e9df9f6ef50719b25ca63b60aad934e14f4a1c Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 29 Jun 2025 15:04:32 -0400 Subject: [PATCH 088/181] Fix some new issues with planning of PlaceHolderVars. In the wake of commit a16ef313f, we need to deal with more cases involving PlaceHolderVars in NestLoopParams than we did before. For one thing, a16ef313f was incorrect to suppose that we could rely on the required-outer relids of the lefthand path to decide placement of nestloop-parameter PHVs. As Richard Guo argued at the time, we must look at the required-outer relids of the join path itself. For another, we have to apply replace_nestloop_params() to such a PHV's expression, in case it contains references to values that will be supplied from NestLoopParams of higher-level nestloops. For another, we need to be more careful about the phnullingrels of the PHV than we were being. identify_current_nestloop_params only bothered to ensure that the phnullingrels didn't contain "too many" relids, but now it has to be exact, because setrefs.c will apply both NRM_SUBSET and NRM_SUPERSET checks in different places. We can compute the correct relids by determining the set of outer joins that should be able to null the PHV and then subtracting whatever's been applied at or below this join. Do the same for plain Vars, too. (This should make it possible to use NRM_EQUAL to process nestloop params in setrefs.c, but I won't risk making such a change in v18 now.) Lastly, if a nestloop parameter PHV was pulled up out of a subquery and it contains a subquery that was originally pushed down from this query level, then that will still be represented as a SubLink, because SS_process_sublinks won't recurse into outer PHVs, so it didn't get transformed during expression preprocessing in the subquery. We can substitute the version of the PHV's expression appearing in its PlaceHolderInfo to ensure that that preprocessing has happened. (Seems like this processing sequence could stand to be redesigned, but again, late in v18 development is not the time for that.) It's not very clear to me why the old have_dangerous_phv join-order restriction prevented us from seeing the last three of these problems. But given the lack of field complaints, it must have done so. Reported-by: Alexander Lakhin Author: Tom Lane Discussion: https://postgr.es/m/18953-1c9883a9d4afeb30@postgresql.org --- src/backend/optimizer/plan/createplan.c | 34 ++++- src/backend/optimizer/util/paramassign.c | 84 +++++++----- src/backend/optimizer/util/placeholder.c | 40 ++++++ src/include/optimizer/paramassign.h | 3 +- src/include/optimizer/placeholder.h | 2 + src/test/regress/expected/join.out | 158 +++++++++++++++++++++++ src/test/regress/sql/join.sql | 46 +++++++ 7 files changed, 330 insertions(+), 37 deletions(-) diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 8baf36ba4b791..0b61aef962c6d 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -4344,6 +4344,7 @@ create_nestloop_plan(PlannerInfo *root, NestLoop *join_plan; Plan *outer_plan; Plan *inner_plan; + Relids outerrelids; List *tlist = build_path_tlist(root, &best_path->jpath.path); List *joinrestrictclauses = best_path->jpath.joinrestrictinfo; List *joinclauses; @@ -4374,8 +4375,8 @@ create_nestloop_plan(PlannerInfo *root, outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0); /* For a nestloop, include outer relids in curOuterRels for inner side */ - root->curOuterRels = bms_union(root->curOuterRels, - best_path->jpath.outerjoinpath->parent->relids); + outerrelids = best_path->jpath.outerjoinpath->parent->relids; + root->curOuterRels = bms_union(root->curOuterRels, outerrelids); inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0); @@ -4415,7 +4416,8 @@ create_nestloop_plan(PlannerInfo *root, * node, and remove them from root->curOuterParams. */ nestParams = identify_current_nestloop_params(root, - best_path->jpath.outerjoinpath); + outerrelids, + PATH_REQ_OUTER((Path *) best_path)); /* * While nestloop parameters that are Vars had better be available from @@ -4423,32 +4425,50 @@ create_nestloop_plan(PlannerInfo *root, * that are PHVs won't be. In such cases we must add them to the * outer_plan's tlist, since the executor's NestLoopParam machinery * requires the params to be simple outer-Var references to that tlist. + * (This is cheating a little bit, because the outer path's required-outer + * relids might not be enough to allow evaluating such a PHV. But in + * practice, if we could have evaluated the PHV at the nestloop node, we + * can do so in the outer plan too.) */ outer_tlist = outer_plan->targetlist; outer_parallel_safe = outer_plan->parallel_safe; foreach(lc, nestParams) { NestLoopParam *nlp = (NestLoopParam *) lfirst(lc); + PlaceHolderVar *phv; TargetEntry *tle; if (IsA(nlp->paramval, Var)) continue; /* nothing to do for simple Vars */ - if (tlist_member((Expr *) nlp->paramval, outer_tlist)) + /* Otherwise it must be a PHV */ + phv = castNode(PlaceHolderVar, nlp->paramval); + + if (tlist_member((Expr *) phv, outer_tlist)) continue; /* already available */ + /* + * It's possible that nestloop parameter PHVs selected to evaluate + * here contain references to surviving root->curOuterParams items + * (that is, they reference values that will be supplied by some + * higher-level nestloop). Those need to be converted to Params now. + * Note: it's safe to do this after the tlist_member() check, because + * equal() won't pay attention to phv->phexpr. + */ + phv->phexpr = (Expr *) replace_nestloop_params(root, + (Node *) phv->phexpr); + /* Make a shallow copy of outer_tlist, if we didn't already */ if (outer_tlist == outer_plan->targetlist) outer_tlist = list_copy(outer_tlist); /* ... and add the needed expression */ - tle = makeTargetEntry((Expr *) copyObject(nlp->paramval), + tle = makeTargetEntry((Expr *) copyObject(phv), list_length(outer_tlist) + 1, NULL, true); outer_tlist = lappend(outer_tlist, tle); /* ... and track whether tlist is (still) parallel-safe */ if (outer_parallel_safe) - outer_parallel_safe = is_parallel_safe(root, - (Node *) nlp->paramval); + outer_parallel_safe = is_parallel_safe(root, (Node *) phv); } if (outer_tlist != outer_plan->targetlist) outer_plan = change_plan_targetlist(outer_plan, outer_tlist, diff --git a/src/backend/optimizer/util/paramassign.c b/src/backend/optimizer/util/paramassign.c index 9836abf947995..4c13c5931b4c9 100644 --- a/src/backend/optimizer/util/paramassign.c +++ b/src/backend/optimizer/util/paramassign.c @@ -599,38 +599,31 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params) } /* - * Identify any NestLoopParams that should be supplied by a NestLoop plan - * node with the specified lefthand input path. Remove them from the active - * root->curOuterParams list and return them as the result list. + * Identify any NestLoopParams that should be supplied by a NestLoop + * plan node with the specified lefthand rels and required-outer rels. + * Remove them from the active root->curOuterParams list and return + * them as the result list. * - * XXX Here we also hack up the returned Vars and PHVs so that they do not - * contain nullingrel sets exceeding what is available from the outer side. - * This is needed if we have applied outer join identity 3, - * (A leftjoin B on (Pab)) leftjoin C on (Pb*c) - * = A leftjoin (B leftjoin C on (Pbc)) on (Pab) - * and C contains lateral references to B. It's still safe to apply the - * identity, but the parser will have created those references in the form - * "b*" (i.e., with varnullingrels listing the A/B join), while what we will - * have available from the nestloop's outer side is just "b". We deal with - * that here by stripping the nullingrels down to what is available from the - * outer side according to leftrelids. - * - * That fixes matters for the case of forward application of identity 3. - * If the identity was applied in the reverse direction, we will have - * parameter Vars containing too few nullingrel bits rather than too many. - * Currently, that causes no problems because setrefs.c applies only a - * subset check to nullingrels in NestLoopParams, but we'd have to work - * harder if we ever want to tighten that check. This is all pretty annoying - * because it greatly weakens setrefs.c's cross-check, but the alternative + * Vars and PHVs appearing in the result list must have nullingrel sets + * that could validly appear in the lefthand rel's output. Ordinarily that + * would be true already, but if we have applied outer join identity 3, + * there could be more or fewer nullingrel bits in the nodes appearing in + * curOuterParams than are in the nominal leftrelids. We deal with that by + * forcing their nullingrel sets to include exactly the outer-join relids + * that appear in leftrelids and can null the respective Var or PHV. + * This fix is a bit ad-hoc and intellectually unsatisfactory, because it's + * essentially jumping to the conclusion that we've placed evaluation of + * the nestloop parameters correctly, and thus it defeats the intent of the + * subsequent nullingrel cross-checks in setrefs.c. But the alternative * seems to be to generate multiple versions of each laterally-parameterized * subquery, which'd be unduly expensive. */ List * -identify_current_nestloop_params(PlannerInfo *root, Path *leftpath) +identify_current_nestloop_params(PlannerInfo *root, + Relids leftrelids, + Relids outerrelids) { List *result; - Relids leftrelids = leftpath->parent->relids; - Relids outerrelids = PATH_REQ_OUTER(leftpath); Relids allleftrelids; ListCell *cell; @@ -661,25 +654,58 @@ identify_current_nestloop_params(PlannerInfo *root, Path *leftpath) bms_is_member(nlp->paramval->varno, leftrelids)) { Var *var = (Var *) nlp->paramval; + RelOptInfo *rel = root->simple_rel_array[var->varno]; root->curOuterParams = foreach_delete_current(root->curOuterParams, cell); - var->varnullingrels = bms_intersect(var->varnullingrels, + var->varnullingrels = bms_intersect(rel->nulling_relids, leftrelids); result = lappend(result, nlp); } else if (IsA(nlp->paramval, PlaceHolderVar)) { PlaceHolderVar *phv = (PlaceHolderVar *) nlp->paramval; - Relids eval_at = find_placeholder_info(root, phv)->ph_eval_at; + PlaceHolderInfo *phinfo = find_placeholder_info(root, phv); + Relids eval_at = phinfo->ph_eval_at; if (bms_is_subset(eval_at, allleftrelids) && bms_overlap(eval_at, leftrelids)) { root->curOuterParams = foreach_delete_current(root->curOuterParams, cell); - phv->phnullingrels = bms_intersect(phv->phnullingrels, - leftrelids); + + /* + * Deal with an edge case: if the PHV was pulled up out of a + * subquery and it contains a subquery that was originally + * pushed down from this query level, then that will still be + * represented as a SubLink, because SS_process_sublinks won't + * recurse into outer PHVs, so it didn't get transformed + * during expression preprocessing in the subquery. We need a + * version of the PHV that has a SubPlan, which we can get + * from the current query level's placeholder_list. This is + * quite grotty of course, but dealing with it earlier in the + * handling of subplan params would be just as grotty, and it + * might end up being a waste of cycles if we don't decide to + * treat the PHV as a NestLoopParam. (Perhaps that whole + * mechanism should be redesigned someday, but today is not + * that day.) + */ + if (root->parse->hasSubLinks) + { + phv = copyObject(phinfo->ph_var); + + /* + * The ph_var will have empty nullingrels, but that + * doesn't matter since we're about to overwrite + * phv->phnullingrels. Other fields should be OK already. + */ + nlp->paramval = (Var *) phv; + } + + phv->phnullingrels = + bms_intersect(get_placeholder_nulling_relids(root, phinfo), + leftrelids); + result = lappend(result, nlp); } } diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c index 41a4c81e94a75..e1cd00a72fbf7 100644 --- a/src/backend/optimizer/util/placeholder.c +++ b/src/backend/optimizer/util/placeholder.c @@ -545,3 +545,43 @@ contain_placeholder_references_walker(Node *node, return expression_tree_walker(node, contain_placeholder_references_walker, context); } + +/* + * Compute the set of outer-join relids that can null a placeholder. + * + * This is analogous to RelOptInfo.nulling_relids for Vars, but we compute it + * on-the-fly rather than saving it somewhere. Currently the value is needed + * at most once per query, so there's little value in doing otherwise. If it + * ever gains more widespread use, perhaps we should cache the result in + * PlaceHolderInfo. + */ +Relids +get_placeholder_nulling_relids(PlannerInfo *root, PlaceHolderInfo *phinfo) +{ + Relids result = NULL; + int relid = -1; + + /* + * Form the union of all potential nulling OJs for each baserel included + * in ph_eval_at. + */ + while ((relid = bms_next_member(phinfo->ph_eval_at, relid)) > 0) + { + RelOptInfo *rel = root->simple_rel_array[relid]; + + /* ignore the RTE_GROUP RTE */ + if (relid == root->group_rtindex) + continue; + + if (rel == NULL) /* must be an outer join */ + { + Assert(bms_is_member(relid, root->outer_join_rels)); + continue; + } + result = bms_add_members(result, rel->nulling_relids); + } + + /* Now remove any OJs already included in ph_eval_at, and we're done. */ + result = bms_del_members(result, phinfo->ph_eval_at); + return result; +} diff --git a/src/include/optimizer/paramassign.h b/src/include/optimizer/paramassign.h index d30d20de29922..bbf7214289bd5 100644 --- a/src/include/optimizer/paramassign.h +++ b/src/include/optimizer/paramassign.h @@ -30,7 +30,8 @@ extern Param *replace_nestloop_param_placeholdervar(PlannerInfo *root, extern void process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params); extern List *identify_current_nestloop_params(PlannerInfo *root, - Path *leftpath); + Relids leftrelids, + Relids outerrelids); extern Param *generate_new_exec_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, Oid paramcollation); extern int assign_special_exec_param(PlannerInfo *root); diff --git a/src/include/optimizer/placeholder.h b/src/include/optimizer/placeholder.h index d351045e2e056..db92d8861babe 100644 --- a/src/include/optimizer/placeholder.h +++ b/src/include/optimizer/placeholder.h @@ -30,5 +30,7 @@ extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, SpecialJoinInfo *sjinfo); extern bool contain_placeholder_references_to(PlannerInfo *root, Node *clause, int relid); +extern Relids get_placeholder_nulling_relids(PlannerInfo *root, + PlaceHolderInfo *phinfo); #endif /* PLACEHOLDER_H */ diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index c292f04fdbaab..390aabfb34b9a 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -4119,6 +4119,164 @@ select * from int8_tbl t1 (16 rows) rollback; +-- ... not that the initial replacement didn't have some bugs too +begin; +create temp table t(i int primary key); +explain (verbose, costs off) +select * from t t1 + left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2 + left join t t3(i3) on false + left join t t4(i4) on t4.i4 > t2ss.x; + QUERY PLAN +---------------------------------------------------------- + Nested Loop Left Join + Output: t1.i, (1), t2.i2, i3, t4.i4 + -> Nested Loop Left Join + Output: t1.i, t2.i2, (1), i3 + Join Filter: false + -> Hash Left Join + Output: t1.i, t2.i2, (1) + Inner Unique: true + Hash Cond: (t1.i = t2.i2) + -> Seq Scan on pg_temp.t t1 + Output: t1.i + -> Hash + Output: t2.i2, (1) + -> Seq Scan on pg_temp.t t2 + Output: t2.i2, 1 + -> Result + Output: i3 + One-Time Filter: false + -> Memoize + Output: t4.i4 + Cache Key: (1) + Cache Mode: binary + -> Index Only Scan using t_pkey on pg_temp.t t4 + Output: t4.i4 + Index Cond: (t4.i4 > (1)) +(25 rows) + +explain (verbose, costs off) +select * from + (select k from + (select i, coalesce(i, j) as k from + (select i from t union all select 0) + join (select 1 as j limit 1) on i = j) + right join (select 2 as x) on true + join (select 3 as y) on i is not null + ), + lateral (select k as kl limit 1); + QUERY PLAN +------------------------------------------------------------------- + Nested Loop + Output: COALESCE(t.i, (1)), ((COALESCE(t.i, (1)))) + -> Limit + Output: 1 + -> Result + Output: 1 + -> Nested Loop + Output: t.i, ((COALESCE(t.i, (1)))) + -> Result + Output: t.i, COALESCE(t.i, (1)) + -> Append + -> Index Only Scan using t_pkey on pg_temp.t + Output: t.i + Index Cond: (t.i = (1)) + -> Result + Output: 0 + One-Time Filter: ((1) = 0) + -> Limit + Output: ((COALESCE(t.i, (1)))) + -> Result + Output: (COALESCE(t.i, (1))) +(21 rows) + +rollback; +-- PHVs containing SubLinks are quite tricky to get right +explain (verbose, costs off) +select * +from int8_tbl i8 + inner join + (select (select true) as x + from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1 + where i4.f1 = 0) ss2 on true + right join (select false as z) ss3 on true, + lateral (select i8.q2 as q2l where x limit 1) ss4 +where i8.q2 = 123; + QUERY PLAN +---------------------------------------------------------------- + Nested Loop + Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2) + InitPlan 1 + -> Result + Output: true + InitPlan 2 + -> Result + Output: true + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 + Filter: (i4.f1 = 0) + -> Nested Loop + Output: i8.q1, i8.q2, (i8.q2) + -> Subquery Scan on ss1 + Output: ss1.y, (InitPlan 1).col1 + -> Limit + Output: NULL::integer + -> Result + Output: NULL::integer + -> Nested Loop + Output: i8.q1, i8.q2, (i8.q2) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q2 = 123) + -> Limit + Output: (i8.q2) + -> Result + Output: i8.q2 + One-Time Filter: ((InitPlan 1).col1) +(29 rows) + +explain (verbose, costs off) +select * +from int8_tbl i8 + inner join + (select (select true) as x + from int4_tbl i4, lateral (select 1 as y limit 1) ss1 + where i4.f1 = 0) ss2 on true + right join (select false as z) ss3 on true, + lateral (select i8.q2 as q2l where x limit 1) ss4 +where i8.q2 = 123; + QUERY PLAN +---------------------------------------------------------------- + Nested Loop + Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2) + InitPlan 1 + -> Result + Output: true + InitPlan 2 + -> Result + Output: true + -> Limit + Output: NULL::integer + -> Result + Output: NULL::integer + -> Nested Loop + Output: i8.q1, i8.q2, (i8.q2) + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1, (InitPlan 1).col1 + Filter: (i4.f1 = 0) + -> Nested Loop + Output: i8.q1, i8.q2, (i8.q2) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q2 = 123) + -> Limit + Output: (i8.q2) + -> Result + Output: i8.q2 + One-Time Filter: ((InitPlan 1).col1) +(27 rows) + -- Test proper handling of appendrel PHVs during useless-RTE removal explain (costs off) select * from diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 88d2204e4471d..f6e7070db656b 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -1361,6 +1361,52 @@ select * from int8_tbl t1 on true; rollback; +-- ... not that the initial replacement didn't have some bugs too +begin; +create temp table t(i int primary key); + +explain (verbose, costs off) +select * from t t1 + left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2 + left join t t3(i3) on false + left join t t4(i4) on t4.i4 > t2ss.x; + +explain (verbose, costs off) +select * from + (select k from + (select i, coalesce(i, j) as k from + (select i from t union all select 0) + join (select 1 as j limit 1) on i = j) + right join (select 2 as x) on true + join (select 3 as y) on i is not null + ), + lateral (select k as kl limit 1); + +rollback; + +-- PHVs containing SubLinks are quite tricky to get right +explain (verbose, costs off) +select * +from int8_tbl i8 + inner join + (select (select true) as x + from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1 + where i4.f1 = 0) ss2 on true + right join (select false as z) ss3 on true, + lateral (select i8.q2 as q2l where x limit 1) ss4 +where i8.q2 = 123; + +explain (verbose, costs off) +select * +from int8_tbl i8 + inner join + (select (select true) as x + from int4_tbl i4, lateral (select 1 as y limit 1) ss1 + where i4.f1 = 0) ss2 on true + right join (select false as z) ss3 on true, + lateral (select i8.q2 as q2l where x limit 1) ss4 +where i8.q2 = 123; + -- Test proper handling of appendrel PHVs during useless-RTE removal explain (costs off) select * from From 0ebd24255581837f9a5b189ef15147b769df116b Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Sun, 29 Jun 2025 21:14:21 -0400 Subject: [PATCH 089/181] Run pgperltidy This is required before the creation of a new branch. pgindent is clean, as well as is reformat-dat-files. perltidy version is v20230309, as documented in pgindent's README. --- contrib/amcheck/t/006_verify_gin.pl | 97 +++++++------------ src/bin/initdb/t/001_initdb.pl | 3 +- src/bin/pg_basebackup/t/030_pg_recvlogical.pl | 3 +- src/bin/pg_combinebackup/t/010_hardlink.pl | 92 +++++++++--------- src/bin/pg_dump/t/001_basic.pl | 9 +- src/bin/pg_dump/t/002_pg_dump.pl | 5 +- src/bin/pg_dump/t/006_pg_dumpall.pl | 53 +++++----- src/bin/pg_rewind/t/RewindTest.pm | 2 +- src/bin/pg_upgrade/t/004_subscription.pl | 6 +- src/bin/pg_upgrade/t/006_transfer_modes.pl | 28 ++++-- src/bin/scripts/t/100_vacuumdb.pl | 79 +++++++++++---- src/include/catalog/pg_collation.dat | 3 +- src/include/catalog/pg_proc.dat | 93 ++++++++---------- src/test/authentication/t/001_password.pl | 23 ++--- .../libpq_pipeline/t/001_libpq_pipeline.pl | 6 +- src/test/modules/test_aio/t/001_aio.pl | 46 +++++---- .../postmaster/t/002_connection_limits.pl | 3 +- .../t/040_standby_failover_slots_sync.pl | 3 +- .../recovery/t/048_vacuum_horizon_floor.pl | 54 ++++++----- src/test/ssl/t/SSL/Server.pm | 3 +- src/test/subscription/t/007_ddl.pl | 35 ++++--- src/test/subscription/t/013_partition.pl | 3 +- src/test/subscription/t/024_add_drop_pub.pl | 14 +-- src/test/subscription/t/035_conflicts.pl | 3 +- 24 files changed, 369 insertions(+), 297 deletions(-) diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl index e540cd6606adf..5be0bee32183f 100644 --- a/contrib/amcheck/t/006_verify_gin.pl +++ b/contrib/amcheck/t/006_verify_gin.pl @@ -54,20 +54,17 @@ sub invalid_entry_order_leaf_page_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # produce wrong order by replacing aaaaa with ccccc - string_replace_block( - $relpath, - 'aaaaa', - 'ccccc', - $blkno - ); + string_replace_block($relpath, 'aaaaa', 'ccccc', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; like($stderr, qr/$expected/); } @@ -96,20 +93,17 @@ sub invalid_entry_order_inner_page_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr.... - string_replace_block( - $relpath, - 'rrrrrrrrrr', - 'zzzzzzzzzz', - $blkno - ); + string_replace_block($relpath, 'rrrrrrrrrr', 'zzzzzzzzzz', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; like($stderr, qr/$expected/); } @@ -129,7 +123,7 @@ sub invalid_entry_columns_order_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # mess column numbers # root items order before: (1,aaa), (2,bbb) @@ -139,26 +133,18 @@ sub invalid_entry_columns_order_test my $find = qr/($attrno_1)(.)(aaa)/s; my $replace = $attrno_2 . '$2$3'; - string_replace_block( - $relpath, - $find, - $replace, - $blkno - ); + string_replace_block($relpath, $find, $replace, $blkno); $find = qr/($attrno_2)(.)(bbb)/s; $replace = $attrno_1 . '$2$3'; - string_replace_block( - $relpath, - $find, - $replace, - $blkno - ); + string_replace_block($relpath, $find, $replace, $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; like($stderr, qr/$expected/); } @@ -183,20 +169,17 @@ sub inconsistent_with_parent_key__parent_key_corrupted_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys - string_replace_block( - $relpath, - 'nnnnnnnnnn', - 'aaaaaaaaaa', - $blkno - ); + string_replace_block($relpath, 'nnnnnnnnnn', 'aaaaaaaaaa', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has inconsistent records on page 3 offset 3"; like($stderr, qr/$expected/); } @@ -221,20 +204,17 @@ sub inconsistent_with_parent_key__child_key_corrupted_test $node->stop; - my $blkno = 3; # leaf + my $blkno = 3; # leaf # we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger - string_replace_block( - $relpath, - 'nnnnnnnnnn', - 'pppppppppp', - $blkno - ); + string_replace_block($relpath, 'nnnnnnnnnn', 'pppppppppp', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has inconsistent records on page 3 offset 3"; like($stderr, qr/$expected/); } @@ -254,24 +234,21 @@ sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test $node->stop; - my $blkno = 2; # posting tree root + my $blkno = 2; # posting tree root # we have a posting tree for 'aaaaa' key with the root at 2nd block # and two leaf pages 3 and 4. replace 4th page's high key with (1,1) # so that there are tid's in leaf page that are larger then the new high key. my $find = pack('S*', 0, 4, 0) . '....'; my $replace = pack('S*', 0, 4, 0, 1, 1); - string_replace_block( - $relpath, - $find, - $replace, - $blkno - ); + string_replace_block($relpath, $find, $replace, $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; like($stderr, qr/$expected/); } diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl index 15dd10ce40a31..b7ef7ed8d06b7 100644 --- a/src/bin/initdb/t/001_initdb.pl +++ b/src/bin/initdb/t/001_initdb.pl @@ -76,7 +76,8 @@ 'checksums are enabled in control file'); command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only'); -command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files'); +command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], + '--no-sync-data-files'); command_fails([ 'initdb', $datadir ], 'existing data directory'); if ($supports_syncfs) diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl index 5f46357e72ac7..1b7a6f6f43fdd 100644 --- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl +++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl @@ -147,7 +147,8 @@ 'slot with failover created'); my $result = $node->safe_psql('postgres', - "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"); + "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'" +); is($result, 't', "failover is enabled for the new slot"); done_testing(); diff --git a/src/bin/pg_combinebackup/t/010_hardlink.pl b/src/bin/pg_combinebackup/t/010_hardlink.pl index a0ee419090cf6..4f92d6676bdef 100644 --- a/src/bin/pg_combinebackup/t/010_hardlink.pl +++ b/src/bin/pg_combinebackup/t/010_hardlink.pl @@ -56,7 +56,7 @@ '--pgdata' => $backup1path, '--no-sync', '--checkpoint' => 'fast', - '--wal-method' => 'none' + '--wal-method' => 'none' ], "full backup"); @@ -74,7 +74,7 @@ '--pgdata' => $backup2path, '--no-sync', '--checkpoint' => 'fast', - '--wal-method' => 'none', + '--wal-method' => 'none', '--incremental' => $backup1path . '/backup_manifest' ], "incremental backup"); @@ -112,45 +112,45 @@ # of the given data file. sub check_data_file { - my ($data_file, $last_segment_nlinks) = @_; - - my @data_file_segments = ($data_file); - - # Start checking for additional segments - my $segment_number = 1; - - while (1) - { - my $next_segment = $data_file . '.' . $segment_number; - - # If the file exists and is a regular file, add it to the list - if (-f $next_segment) - { - push @data_file_segments, $next_segment; - $segment_number++; - } - # Stop the loop if the file doesn't exist - else - { - last; - } - } - - # All segments of the given data file should contain 2 hard links, except - # for the last one, which should match the given number of links. - my $last_segment = pop @data_file_segments; - - for my $segment (@data_file_segments) - { - # Get the file's stat information of each segment - my $nlink_count = get_hard_link_count($segment); - ok($nlink_count == 2, "File '$segment' has 2 hard links"); - } - - # Get the file's stat information of the last segment - my $nlink_count = get_hard_link_count($last_segment); - ok($nlink_count == $last_segment_nlinks, - "File '$last_segment' has $last_segment_nlinks hard link(s)"); + my ($data_file, $last_segment_nlinks) = @_; + + my @data_file_segments = ($data_file); + + # Start checking for additional segments + my $segment_number = 1; + + while (1) + { + my $next_segment = $data_file . '.' . $segment_number; + + # If the file exists and is a regular file, add it to the list + if (-f $next_segment) + { + push @data_file_segments, $next_segment; + $segment_number++; + } + # Stop the loop if the file doesn't exist + else + { + last; + } + } + + # All segments of the given data file should contain 2 hard links, except + # for the last one, which should match the given number of links. + my $last_segment = pop @data_file_segments; + + for my $segment (@data_file_segments) + { + # Get the file's stat information of each segment + my $nlink_count = get_hard_link_count($segment); + ok($nlink_count == 2, "File '$segment' has 2 hard links"); + } + + # Get the file's stat information of the last segment + my $nlink_count = get_hard_link_count($last_segment); + ok($nlink_count == $last_segment_nlinks, + "File '$last_segment' has $last_segment_nlinks hard link(s)"); } @@ -159,11 +159,11 @@ sub check_data_file # that file. sub get_hard_link_count { - my ($file) = @_; + my ($file) = @_; - # Get file stats - my @stats = stat($file); - my $nlink = $stats[3]; # Number of hard links + # Get file stats + my @stats = stat($file); + my $nlink = $stats[3]; # Number of hard links - return $nlink; + return $nlink; } diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl index 0be9f6dd538fd..c3c5fae11eaaf 100644 --- a/src/bin/pg_dump/t/001_basic.pl +++ b/src/bin/pg_dump/t/001_basic.pl @@ -240,17 +240,20 @@ command_fails_like( [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ], qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/, - 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'); + 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only' +); command_fails_like( [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ], qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --exclude-database is used in pg_restore with dump of pg_dump'); + 'When option --exclude-database is used in pg_restore with dump of pg_dump' +); command_fails_like( [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ], qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --globals-only is not used in pg_restore with dump of pg_dump'); + 'When option --globals-only is not used in pg_restore with dump of pg_dump' +); # also fails for -r and -t, but it seems pointless to add more tests for those. command_fails_like( diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index e1cfa99874ec4..2485d8f360e5a 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -368,7 +368,7 @@ '--data-only', '--superuser' => 'test_superuser', '--disable-triggers', - '--verbose', # no-op, just make sure it works + '--verbose', # no-op, just make sure it works 'postgres', ], }, @@ -810,8 +810,7 @@ dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_schema.sql", '--no-schema', - '--with-statistics', - 'postgres', + '--with-statistics', 'postgres', ], },); diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl index 0ea02a3a4a940..c274b777586ad 100644 --- a/src/bin/pg_dump/t/006_pg_dumpall.pl +++ b/src/bin/pg_dump/t/006_pg_dumpall.pl @@ -294,17 +294,17 @@ '--format' => 'directory', '--globals-only', '--file' => "$tempdir/dump_globals_only", - ], - restore_cmd => [ - 'pg_restore', '-C', '--globals-only', - '--format' => 'directory', - '--file' => "$tempdir/dump_globals_only.sql", - "$tempdir/dump_globals_only", - ], - like => qr/ + ], + restore_cmd => [ + 'pg_restore', '-C', '--globals-only', + '--format' => 'directory', + '--file' => "$tempdir/dump_globals_only.sql", + "$tempdir/dump_globals_only", + ], + like => qr/ ^\s*\QCREATE ROLE dumpall;\E\s*\n /xm - }, ); + },); # First execute the setup_sql foreach my $run (sort keys %pgdumpall_runs) @@ -339,7 +339,8 @@ # pg_restore --file output file. my $output_file = slurp_file("$tempdir/${run}.sql"); - if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike})) + if ( !($pgdumpall_runs{$run}->{like}) + && !($pgdumpall_runs{$run}->{unlike})) { die "missing \"like\" or \"unlike\" in test \"$run\""; } @@ -361,30 +362,38 @@ # Some negative test case with dump of pg_dumpall and restore using pg_restore # test case 1: when -C is not used in pg_restore with dump of pg_dumpall $node->command_fails_like( - [ 'pg_restore', - "$tempdir/format_custom", - '--format' => 'custom', - '--file' => "$tempdir/error_test.sql", ], - qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, - 'When -C is not used in pg_restore with dump of pg_dumpall'); + [ + 'pg_restore', + "$tempdir/format_custom", + '--format' => 'custom', + '--file' => "$tempdir/error_test.sql", + ], + qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, + 'When -C is not used in pg_restore with dump of pg_dumpall'); # test case 2: When --list option is used with dump of pg_dumpall $node->command_fails_like( - [ 'pg_restore', + [ + 'pg_restore', "$tempdir/format_custom", '-C', - '--format' => 'custom', '--list', - '--file' => "$tempdir/error_test.sql", ], + '--format' => 'custom', + '--list', + '--file' => "$tempdir/error_test.sql", + ], qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/, 'When --list is used in pg_restore with dump of pg_dumpall'); # test case 3: When non-exist database is given with -d option $node->command_fails_like( - [ 'pg_restore', + [ + 'pg_restore', "$tempdir/format_custom", '-C', '--format' => 'custom', - '-d' => 'dbpq', ], + '-d' => 'dbpq', + ], qr/\Qpg_restore: error: could not connect to database "dbpq"\E/, - 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'); + 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall' +); $node->stop('fast'); diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm index 3efab8317978a..b0234ebfaf218 100644 --- a/src/bin/pg_rewind/t/RewindTest.pm +++ b/src/bin/pg_rewind/t/RewindTest.pm @@ -285,7 +285,7 @@ sub run_pg_rewind # Check that pg_rewind with dbname and --write-recovery-conf # wrote the dbname in the generated primary_conninfo value. like(slurp_file("$primary_pgdata/postgresql.auto.conf"), - qr/dbname=postgres/m, 'recovery conf file sets dbname'); + qr/dbname=postgres/m, 'recovery conf file sets dbname'); # Check that standby.signal is here as recovery configuration # was requested. diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl index c545abf65816e..e46f02c6cc612 100644 --- a/src/bin/pg_upgrade/t/004_subscription.pl +++ b/src/bin/pg_upgrade/t/004_subscription.pl @@ -53,7 +53,8 @@ $old_sub->stop; -$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0"); +$new_sub->append_conf('postgresql.conf', + "max_active_replication_origins = 0"); # pg_upgrade will fail because the new cluster has insufficient # max_active_replication_origins. @@ -80,7 +81,8 @@ ); # Reset max_active_replication_origins -$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10"); +$new_sub->append_conf('postgresql.conf', + "max_active_replication_origins = 10"); # Cleanup $publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1"); diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl index 550a63fdf7d47..58fe8a8c7dcea 100644 --- a/src/bin/pg_upgrade/t/006_transfer_modes.pl +++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl @@ -13,7 +13,8 @@ sub test_mode { my ($mode) = @_; - my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall}); + my $old = + PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall}); my $new = PostgreSQL::Test::Cluster->new('new'); # --swap can't be used to upgrade from versions older than 10, so just skip @@ -40,9 +41,11 @@ sub test_mode # Create a small variety of simple test objects on the old cluster. We'll # check that these reach the new version after upgrading. $old->start; - $old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)"); + $old->safe_psql('postgres', + "CREATE TABLE test1 AS SELECT generate_series(1, 100)"); $old->safe_psql('postgres', "CREATE DATABASE testdb1"); - $old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)"); + $old->safe_psql('testdb1', + "CREATE TABLE test2 AS SELECT generate_series(200, 300)"); $old->safe_psql('testdb1', "VACUUM FULL test2"); $old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432"); @@ -51,10 +54,15 @@ sub test_mode if (defined($ENV{oldinstall})) { my $tblspc = PostgreSQL::Test::Utils::tempdir_short(); - $old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'"); - $old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc"); - $old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"); - $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); + $old->safe_psql('postgres', + "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'"); + $old->safe_psql('postgres', + "CREATE DATABASE testdb2 TABLESPACE test_tblspc"); + $old->safe_psql('postgres', + "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)" + ); + $old->safe_psql('testdb2', + "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); } $old->stop; @@ -90,9 +98,11 @@ sub test_mode # tablespace. if (defined($ENV{oldinstall})) { - $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3"); + $result = + $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3"); is($result, '102', "test3 data after pg_upgrade $mode"); - $result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); + $result = + $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); is($result, '103', "test4 data after pg_upgrade $mode"); } $new->stop; diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index 75ac24a7a5539..ff56a13b46bbb 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -238,62 +238,105 @@ 'cannot use option --all and a dbname as argument at the same time'); $node->safe_psql('postgres', - 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'); + 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;' +); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing stats'); $node->safe_psql('postgres', - 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'); + 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));' +); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing index expression stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing index expression stats'); $node->safe_psql('postgres', - 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'); + 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;' +); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing extended stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing extended stats'); $node->safe_psql('postgres', "CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n" - . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n" - . "ANALYZE regression_vacuumdb_child;\n"); + . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n" + . "ANALYZE regression_vacuumdb_child;\n"); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing inherited stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing inherited stats'); $node->safe_psql('postgres', "CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n" - . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n" - . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n" - . "ANALYZE regression_vacuumdb_part1;\n"); + . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n" + . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n" + . "ANALYZE regression_vacuumdb_part1;\n"); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_parted', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing partition stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_parted', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing partition stats'); diff --git a/src/include/catalog/pg_collation.dat b/src/include/catalog/pg_collation.dat index fb76c421931ea..d8b5d5d2d856b 100644 --- a/src/include/catalog/pg_collation.dat +++ b/src/include/catalog/pg_collation.dat @@ -33,7 +33,8 @@ descr => 'sorts by Unicode code point; Unicode and POSIX character semantics', collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6', colllocale => 'C.UTF-8', collversion => '1' }, -{ oid => '9535', descr => 'sorts by Unicode code point; Unicode character semantics', +{ oid => '9535', + descr => 'sorts by Unicode code point; Unicode character semantics', collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6', colllocale => 'PG_UNICODE_FAST', collversion => '1' }, diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index d3d28a263fa99..4efc1bc499ab5 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -1190,14 +1190,14 @@ proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int8', prosrc => 'int8_bytea' }, { oid => '8580', descr => 'convert bytea to int2', - proname => 'int2', prorettype => 'int2', - proargtypes => 'bytea', prosrc => 'bytea_int2' }, + proname => 'int2', prorettype => 'int2', proargtypes => 'bytea', + prosrc => 'bytea_int2' }, { oid => '8581', descr => 'convert bytea to int4', - proname => 'int4', prorettype => 'int4', - proargtypes => 'bytea', prosrc => 'bytea_int4' }, + proname => 'int4', prorettype => 'int4', proargtypes => 'bytea', + prosrc => 'bytea_int4' }, { oid => '8582', descr => 'convert bytea to int8', - proname => 'int8', prorettype => 'int8', - proargtypes => 'bytea', prosrc => 'bytea_int8' }, + proname => 'int8', prorettype => 'int8', proargtypes => 'bytea', + prosrc => 'bytea_int8' }, { oid => '449', descr => 'hash', proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2', @@ -3597,7 +3597,8 @@ { oid => '8702', descr => 'gamma function', proname => 'gamma', prorettype => 'float8', proargtypes => 'float8', prosrc => 'dgamma' }, -{ oid => '8703', descr => 'natural logarithm of absolute value of gamma function', +{ oid => '8703', + descr => 'natural logarithm of absolute value of gamma function', proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8', prosrc => 'dlgamma' }, @@ -9360,8 +9361,8 @@ proname => 'to_json', provolatile => 's', prorettype => 'json', proargtypes => 'anyelement', prosrc => 'to_json' }, { oid => '3261', descr => 'remove object fields with null values from json', - proname => 'json_strip_nulls', prorettype => 'json', proargtypes => 'json bool', - prosrc => 'json_strip_nulls' }, + proname => 'json_strip_nulls', prorettype => 'json', + proargtypes => 'json bool', prosrc => 'json_strip_nulls' }, { oid => '3947', proname => 'json_object_field', prorettype => 'json', @@ -9483,17 +9484,19 @@ proname => 'uuid_hash_extended', prorettype => 'int8', proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' }, { oid => '3432', descr => 'generate random UUID', - proname => 'gen_random_uuid', provolatile => 'v', - prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, + proname => 'gen_random_uuid', provolatile => 'v', prorettype => 'uuid', + proargtypes => '', prosrc => 'gen_random_uuid' }, { oid => '9895', descr => 'generate UUID version 4', - proname => 'uuidv4', provolatile => 'v', - prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, + proname => 'uuidv4', provolatile => 'v', prorettype => 'uuid', + proargtypes => '', prosrc => 'gen_random_uuid' }, { oid => '9896', descr => 'generate UUID version 7', - proname => 'uuidv7', provolatile => 'v', - prorettype => 'uuid', proargtypes => '', prosrc => 'uuidv7' }, -{ oid => '9897', descr => 'generate UUID version 7 with a timestamp shifted by specified interval', - proname => 'uuidv7', provolatile => 'v', proargnames => '{shift}', - prorettype => 'uuid', proargtypes => 'interval', prosrc => 'uuidv7_interval' }, + proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid', + proargtypes => '', prosrc => 'uuidv7' }, +{ oid => '9897', + descr => 'generate UUID version 7 with a timestamp shifted by specified interval', + proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid', + proargtypes => 'interval', proargnames => '{shift}', + prosrc => 'uuidv7_interval' }, { oid => '6342', descr => 'extract timestamp from UUID', proname => 'uuid_extract_timestamp', proleakproof => 't', prorettype => 'timestamptz', proargtypes => 'uuid', @@ -10299,8 +10302,8 @@ prorettype => 'jsonb', proargtypes => '', prosrc => 'jsonb_build_object_noargs' }, { oid => '3262', descr => 'remove object fields with null values from jsonb', - proname => 'jsonb_strip_nulls', prorettype => 'jsonb', proargtypes => 'jsonb bool', - prosrc => 'jsonb_strip_nulls' }, + proname => 'jsonb_strip_nulls', prorettype => 'jsonb', + proargtypes => 'jsonb bool', prosrc => 'jsonb_strip_nulls' }, { oid => '3478', proname => 'jsonb_object_field', prorettype => 'jsonb', @@ -12508,34 +12511,22 @@ proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}', prosrc => 'pg_get_wal_summarizer_state' }, # Statistics Import -{ oid => '8459', - descr => 'restore statistics on relation', - proname => 'pg_restore_relation_stats', provolatile => 'v', proisstrict => 'f', - provariadic => 'any', - proparallel => 'u', prorettype => 'bool', - proargtypes => 'any', - proargnames => '{kwargs}', - proargmodes => '{v}', - prosrc => 'pg_restore_relation_stats' }, -{ oid => '9160', - descr => 'clear statistics on relation', - proname => 'pg_clear_relation_stats', provolatile => 'v', proisstrict => 'f', - proparallel => 'u', prorettype => 'void', - proargtypes => 'text text', - proargnames => '{schemaname,relname}', - prosrc => 'pg_clear_relation_stats' }, -{ oid => '8461', - descr => 'restore statistics on attribute', - proname => 'pg_restore_attribute_stats', provolatile => 'v', proisstrict => 'f', - provariadic => 'any', - proparallel => 'u', prorettype => 'bool', - proargtypes => 'any', - proargnames => '{kwargs}', - proargmodes => '{v}', - prosrc => 'pg_restore_attribute_stats' }, -{ oid => '9162', - descr => 'clear statistics on attribute', - proname => 'pg_clear_attribute_stats', provolatile => 'v', proisstrict => 'f', +{ oid => '8459', descr => 'restore statistics on relation', + proname => 'pg_restore_relation_stats', provariadic => 'any', + proisstrict => 'f', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}', + proargnames => '{kwargs}', prosrc => 'pg_restore_relation_stats' }, +{ oid => '9160', descr => 'clear statistics on relation', + proname => 'pg_clear_relation_stats', proisstrict => 'f', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'text text', + proargnames => '{schemaname,relname}', prosrc => 'pg_clear_relation_stats' }, +{ oid => '8461', descr => 'restore statistics on attribute', + proname => 'pg_restore_attribute_stats', provariadic => 'any', + proisstrict => 'f', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}', + proargnames => '{kwargs}', prosrc => 'pg_restore_attribute_stats' }, +{ oid => '9162', descr => 'clear statistics on attribute', + proname => 'pg_clear_attribute_stats', proisstrict => 'f', provolatile => 'v', proparallel => 'u', prorettype => 'void', proargtypes => 'text text text bool', proargnames => '{schemaname,relname,attname,inherited}', @@ -12544,13 +12535,13 @@ # GiST stratnum implementations { oid => '8047', descr => 'GiST support', proname => 'gist_translate_cmptype_common', prorettype => 'int2', - proargtypes => 'int4', - prosrc => 'gist_translate_cmptype_common' }, + proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' }, # AIO related functions { oid => '9200', descr => 'information about in-progress asynchronous IOs', proname => 'pg_get_aios', prorows => '100', proretset => 't', - provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '', + provolatile => 'v', proparallel => 'r', prorettype => 'record', + proargtypes => '', proallargtypes => '{int4,int4,int8,text,text,int8,int8,text,int2,int4,text,text,bool,bool,bool}', proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}', diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index 37d96d95a1aeb..a16e9a563f3fd 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -79,39 +79,40 @@ sub test_conn # other tests are added to this file in the future $node->safe_psql('postgres', "CREATE DATABASE test_log_connections"); -my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;)); +my $log_connections = + $node->safe_psql('test_log_connections', q(SHOW log_connections;)); is($log_connections, 'on', qq(check log connections has expected value 'on')); -$node->connect_ok('test_log_connections', +$node->connect_ok( + 'test_log_connections', qq(log_connections 'on' works as expected for backwards compatibility), log_like => [ qr/connection received/, qr/connection authenticated/, qr/connection authorized: user=\S+ database=test_log_connections/, ], - log_unlike => [ - qr/connection ready/, - ],); + log_unlike => [ qr/connection ready/, ],); -$node->safe_psql('test_log_connections', +$node->safe_psql( + 'test_log_connections', q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations; SELECT pg_reload_conf();]); -$node->connect_ok('test_log_connections', +$node->connect_ok( + 'test_log_connections', q(log_connections with subset of specified options logs only those aspects), log_like => [ qr/connection received/, qr/connection authorized: user=\S+ database=test_log_connections/, qr/connection ready/, ], - log_unlike => [ - qr/connection authenticated/, - ],); + log_unlike => [ qr/connection authenticated/, ],); $node->safe_psql('test_log_connections', qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();)); -$node->connect_ok('test_log_connections', +$node->connect_ok( + 'test_log_connections', qq(log_connections 'all' logs all available connection aspects), log_like => [ qr/connection received/, diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl index 61524bdbd8f28..f967885307045 100644 --- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl +++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl @@ -53,7 +53,8 @@ BEGIN $node->command_ok( [ 'libpq_pipeline', @extraargs, - $testname, $node->connstr('postgres') . " max_protocol_version=latest" + $testname, + $node->connstr('postgres') . " max_protocol_version=latest" ], "libpq_pipeline $testname"); @@ -76,7 +77,8 @@ BEGIN # test separately that it still works the old protocol version too. $node->command_ok( [ - 'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0" + 'libpq_pipeline', 'cancel', + $node->connstr('postgres') . " max_protocol_version=3.0" ], "libpq_pipeline cancel with protocol 3.0"); diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl index 4527c70785d34..82ffffc058f75 100644 --- a/src/test/modules/test_aio/t/001_aio.pl +++ b/src/test/modules/test_aio/t/001_aio.pl @@ -1123,7 +1123,8 @@ sub test_zero { # Create a corruption and then read the block without waiting for # completion. - $psql_a->query(qq( + $psql_a->query( + qq( SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true); SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true) )); @@ -1133,7 +1134,8 @@ sub test_zero $psql_b, "$persistency: test completing read by other session doesn't generate warning", qq(SELECT count(*) > 0 FROM tbl_zero;), - qr/^t$/, qr/^$/); + qr/^t$/, + qr/^$/); } # Clean up @@ -1355,18 +1357,24 @@ sub test_ignore_checksum )); $psql->query_safe($invalidate_sql); - psql_like($io_method, $psql, + psql_like( + $io_method, + $psql, "reading block w/ wrong checksum with ignore_checksum_failure=off fails", - $count_sql, qr/^$/, qr/ERROR: invalid page in block/); + $count_sql, + qr/^$/, + qr/ERROR: invalid page in block/); $psql->query_safe("SET ignore_checksum_failure=on"); $psql->query_safe($invalidate_sql); - psql_like($io_method, $psql, - "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds", - $count_sql, - qr/^$expect$/, - qr/WARNING: ignoring (checksum failure|\d checksum failures)/); + psql_like( + $io_method, + $psql, + "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds", + $count_sql, + qr/^$expect$/, + qr/WARNING: ignoring (checksum failure|\d checksum failures)/); # Verify that ignore_checksum_failure=off works in multi-block reads @@ -1432,19 +1440,22 @@ sub test_ignore_checksum # file. $node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/, - $log_location); + $log_location); ok(1, "$io_method: found information about checksum failure in block 2"); - $node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/, - $log_location); + $node->wait_for_log( + qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/, + $log_location); ok(1, "$io_method: found information about invalid page in block 3"); - $node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/, - $log_location); + $node->wait_for_log( + qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/, + $log_location); ok(1, "$io_method: found information about checksum failure in block 4"); - $node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/, - $log_location); + $node->wait_for_log( + qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/, + $log_location); ok(1, "$io_method: found information about checksum failure in block 5"); @@ -1462,8 +1473,7 @@ sub test_ignore_checksum qq( SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 3 of relation/ - ); + qr/^psql::\d+: ERROR: invalid page in block 3 of relation/); psql_like( $io_method, diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl index 6442500fc379a..4a7fb16261f86 100644 --- a/src/test/postmaster/t/002_connection_limits.pl +++ b/src/test/postmaster/t/002_connection_limits.pl @@ -68,7 +68,8 @@ sub connect_fails_wait my $log_location = -s $node->logfile; $node->connect_fails($connstr, $test_name, %params); - $node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/, + $node->wait_for_log( + qr/DEBUG: (00000: )?client backend.*exited with exit code 1/, $log_location); ok(1, "$test_name: client backend process exited"); } diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl index 9c8b49e942d88..2c61c51e914df 100644 --- a/src/test/recovery/t/040_standby_failover_slots_sync.pl +++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl @@ -941,8 +941,7 @@ 'synced slot retained on the new primary'); # Commit the prepared transaction -$standby1->safe_psql('postgres', - "COMMIT PREPARED 'test_twophase_slotsync';"); +$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';"); $standby1->wait_for_catchup('regress_mysub1'); # Confirm that the prepared transaction is replicated to the subscriber diff --git a/src/test/recovery/t/048_vacuum_horizon_floor.pl b/src/test/recovery/t/048_vacuum_horizon_floor.pl index d48a6ef7e0f24..e56fce59d58ea 100644 --- a/src/test/recovery/t/048_vacuum_horizon_floor.pl +++ b/src/test/recovery/t/048_vacuum_horizon_floor.pl @@ -47,7 +47,7 @@ $node_primary->background_psql($test_db, on_error_stop => 1); # Long-running Primary Session B -my $psql_primaryB = +my $psql_primaryB = $node_primary->background_psql($test_db, on_error_stop => 1); # Our test relies on two rounds of index vacuuming for reasons elaborated @@ -81,7 +81,8 @@ # insert and delete enough rows that we force at least one round of index # vacuuming before getting to a dead tuple which was killed after the standby # is disconnected. -$node_primary->safe_psql($test_db, qq[ +$node_primary->safe_psql( + $test_db, qq[ CREATE TABLE ${table1}(col1 int) WITH (autovacuum_enabled=false, fillfactor=10); INSERT INTO $table1 VALUES(7); @@ -98,21 +99,24 @@ $node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn); # Test that the WAL receiver is up and running. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't'); # Set primary_conninfo to something invalid on the replica and reload the # config. Once the config is reloaded, the startup process will force the WAL # receiver to restart and it will be unable to reconnect because of the # invalid connection information. -$node_replica->safe_psql($test_db, qq[ +$node_replica->safe_psql( + $test_db, qq[ ALTER SYSTEM SET primary_conninfo = ''; SELECT pg_reload_conf(); ]); # Wait until the WAL receiver has shut down and been unable to start up again. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f'); # Now insert and update a tuple which will be visible to the vacuum on the # primary but which will have xmax newer than the oldest xmin on the standby @@ -123,7 +127,7 @@ UPDATE $table1 SET col1 = 100 WHERE col1 = 99; SELECT 'after_update'; ] - ); +); # Make sure the UPDATE finished like($res, qr/^after_update$/m, "UPDATE occurred on primary session A"); @@ -148,7 +152,7 @@ DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7; FETCH $primary_cursor1; ] - ); +); is($res, 7, qq[Cursor query returned $res. Expected value 7.]); @@ -183,7 +187,8 @@ # just waiting on the lock to start vacuuming. We don't want the standby to # re-establish a connection to the primary and push the horizon back until # we've saved initial values in GlobalVisState and calculated OldestXmin. -$node_primary->poll_query_until($test_db, +$node_primary->poll_query_until( + $test_db, qq[ SELECT count(*) >= 1 FROM pg_stat_activity WHERE pid = $vacuum_pid @@ -192,8 +197,9 @@ 't'); # Ensure the WAL receiver is still not active on the replica. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f'); # Allow the WAL receiver connection to re-establish. $node_replica->safe_psql( @@ -203,15 +209,17 @@ ]); # Ensure the new WAL receiver has connected. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't'); # Once the WAL sender is shown on the primary, the replica should have # connected with the primary and pushed the horizon backward. Primary Session # A won't see that until the VACUUM FREEZE proceeds and does its first round # of index vacuuming. -$node_primary->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_replication);] , 't'); +$node_primary->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't'); # Move the cursor forward to the next 7. We inserted the 7 much later, so # advancing the cursor should allow vacuum to proceed vacuuming most pages of @@ -225,20 +233,21 @@ # Prevent the test from incorrectly passing by confirming that we did indeed # do a pass of index vacuuming. -$node_primary->poll_query_until($test_db, qq[ +$node_primary->poll_query_until( + $test_db, qq[ SELECT index_vacuum_count > 0 FROM pg_stat_progress_vacuum WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass; - ] , 't'); + ], 't'); # Commit the transaction with the open cursor so that the VACUUM can finish. $psql_primaryB->query_until( - qr/^commit$/m, - qq[ + qr/^commit$/m, + qq[ COMMIT; \\echo commit ] - ); +); # VACUUM proceeds with pruning and does a visibility check on each tuple. In # older versions of Postgres, pruning found our final dead tuple @@ -252,7 +261,8 @@ # With the fix, VACUUM should finish successfully, incrementing the table # vacuum_count. -$node_primary->poll_query_until($test_db, +$node_primary->poll_query_until( + $test_db, qq[ SELECT vacuum_count > 0 FROM pg_stat_all_tables WHERE relname = '${table1}'; diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm index 96f0f201e9c0b..efbd0dafaf60d 100644 --- a/src/test/ssl/t/SSL/Server.pm +++ b/src/test/ssl/t/SSL/Server.pm @@ -318,7 +318,8 @@ sub switch_server_cert $node->append_conf('sslconfig.conf', "ssl=on"); $node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params)); # use lists of ECDH curves and cipher suites for syntax testing - $node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1'); + $node->append_conf('sslconfig.conf', + 'ssl_groups=X25519:prime256v1:secp521r1'); $node->append_conf('sslconfig.conf', 'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256'); diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl index 7d12bcbddb687..2a45fb13739b7 100644 --- a/src/test/subscription/t/007_ddl.pl +++ b/src/test/subscription/t/007_ddl.pl @@ -70,7 +70,8 @@ ); # Cleanup -$node_publisher->safe_psql('postgres', qq[ +$node_publisher->safe_psql( + 'postgres', qq[ DROP PUBLICATION mypub; SELECT pg_drop_replication_slot('mysub'); ]); @@ -86,32 +87,38 @@ sub test_swap my ($table_name, $pubname, $appname) = @_; # Confirms tuples can be replicated - $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);"); + $node_publisher->safe_psql('postgres', + "INSERT INTO $table_name VALUES (1);"); $node_publisher->wait_for_catchup($appname); my $result = - $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name"); - is($result, qq(1), 'check replication worked well before renaming a publication'); + $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name"); + is($result, qq(1), + 'check replication worked well before renaming a publication'); # Swap the name of publications; $pubname <-> pub_empty - $node_publisher->safe_psql('postgres', qq[ + $node_publisher->safe_psql( + 'postgres', qq[ ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp; ALTER PUBLICATION pub_empty RENAME TO $pubname; ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty; ]); # Insert the data again - $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);"); + $node_publisher->safe_psql('postgres', + "INSERT INTO $table_name VALUES (2);"); $node_publisher->wait_for_catchup($appname); # Confirms the second tuple won't be replicated because $pubname does not # contains relations anymore. $result = - $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a"); + $node_subscriber->safe_psql('postgres', + "SELECT a FROM $table_name ORDER BY a"); is($result, qq(1), 'check the tuple inserted after the RENAME was not replicated'); # Restore the name of publications because it can be called several times - $node_publisher->safe_psql('postgres', qq[ + $node_publisher->safe_psql( + 'postgres', qq[ ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp; ALTER PUBLICATION pub_empty RENAME TO $pubname; ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty; @@ -124,7 +131,8 @@ sub test_swap $node_subscriber->safe_psql('postgres', $ddl); # Create publications and a subscription -$node_publisher->safe_psql('postgres', qq[ +$node_publisher->safe_psql( + 'postgres', qq[ CREATE PUBLICATION pub_empty; CREATE PUBLICATION pub_for_tab FOR TABLE test1; CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES; @@ -139,19 +147,20 @@ sub test_swap # Switches a publication which includes all tables $node_subscriber->safe_psql('postgres', - "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;" -); + "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"); $node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub'); # Confirms RENAME command works well for ALL TABLES publication test_swap('test2', 'pub_for_all_tables', 'tap_sub'); # Cleanup -$node_publisher->safe_psql('postgres', qq[ +$node_publisher->safe_psql( + 'postgres', qq[ DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables; DROP TABLE test1, test2; ]); -$node_subscriber->safe_psql('postgres', qq[ +$node_subscriber->safe_psql( + 'postgres', qq[ DROP SUBSCRIPTION tap_sub; DROP TABLE test1, test2; ]); diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl index 61b0cb4aa1ac1..4f78dd48815f0 100644 --- a/src/test/subscription/t/013_partition.pl +++ b/src/test/subscription/t/013_partition.pl @@ -51,8 +51,7 @@ ); # make a BRIN index to test aminsertcleanup logic in subscriber $node_subscriber1->safe_psql('postgres', - "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)" -); + "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"); $node_subscriber1->safe_psql('postgres', "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)" ); diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl index 5298d43197900..b396abe559947 100644 --- a/src/test/subscription/t/024_add_drop_pub.pl +++ b/src/test/subscription/t/024_add_drop_pub.pl @@ -108,11 +108,12 @@ my $offset = -s $node_publisher->logfile; -$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)"); # Verify that a warning is logged. $node_publisher->wait_for_log( - qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, $offset); + qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, + $offset); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3"); @@ -128,10 +129,11 @@ # Verify that the insert operation gets replicated to subscriber after # publication is created. -$result = $node_subscriber->safe_psql('postgres', - "SELECT * FROM tab_3"); -is($result, qq(1 -2), 'check that the incremental data is replicated after the publication is created'); +$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3"); +is( $result, qq(1 +2), + 'check that the incremental data is replicated after the publication is created' +); # shutdown $node_subscriber->stop('fast'); diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl index 2a7a8239a2966..d78a6bac16aeb 100644 --- a/src/test/subscription/t/035_conflicts.pl +++ b/src/test/subscription/t/035_conflicts.pl @@ -26,7 +26,8 @@ "CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"); $node_publisher->safe_psql('postgres', - "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"); + "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);" +); # Create same table on subscriber $node_subscriber->safe_psql('postgres', From 9c5b9a280cb6089c011a01797868da83f97d0230 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Sun, 29 Jun 2025 21:43:39 -0400 Subject: [PATCH 090/181] Do pre-release housekeeping on catalog data. Run renumber_oids.pl to move high-numbered OIDs down, as per pre-beta tasks specified by RELEASE_CHANGES. For reference, the command was ./renumber_oids.pl --first-mapped-oid 8000 --target-oid 6300 This should have been done prior to beta1, but it was forgotten. This will ensure we get the correct numbering for beta2 onward. --- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_authid.dat | 2 +- src/include/catalog/pg_collation.dat | 2 +- src/include/catalog/pg_index.h | 2 +- src/include/catalog/pg_proc.dat | 162 +++++++++++++-------------- 5 files changed, 85 insertions(+), 85 deletions(-) diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index d63db42ed7b37..479629825f5b7 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -57,6 +57,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202506251 +#define CATALOG_VERSION_NO 202506291 #endif diff --git a/src/include/catalog/pg_authid.dat b/src/include/catalog/pg_authid.dat index eb4dab5c6aa77..c881c13adf1bc 100644 --- a/src/include/catalog/pg_authid.dat +++ b/src/include/catalog/pg_authid.dat @@ -99,7 +99,7 @@ rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', rolpassword => '_null_', rolvaliduntil => '_null_' }, -{ oid => '8916', oid_symbol => 'ROLE_PG_SIGNAL_AUTOVACUUM_WORKER', +{ oid => '6392', oid_symbol => 'ROLE_PG_SIGNAL_AUTOVACUUM_WORKER', rolname => 'pg_signal_autovacuum_worker', rolsuper => 'f', rolinherit => 't', rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', diff --git a/src/include/catalog/pg_collation.dat b/src/include/catalog/pg_collation.dat index d8b5d5d2d856b..8cfd09f03140e 100644 --- a/src/include/catalog/pg_collation.dat +++ b/src/include/catalog/pg_collation.dat @@ -33,7 +33,7 @@ descr => 'sorts by Unicode code point; Unicode and POSIX character semantics', collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6', colllocale => 'C.UTF-8', collversion => '1' }, -{ oid => '9535', +{ oid => '6411', descr => 'sorts by Unicode code point; Unicode character semantics', collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6', colllocale => 'PG_UNICODE_FAST', collversion => '1' }, diff --git a/src/include/catalog/pg_index.h b/src/include/catalog/pg_index.h index 4392b9d221d5a..731d3938169e6 100644 --- a/src/include/catalog/pg_index.h +++ b/src/include/catalog/pg_index.h @@ -69,7 +69,7 @@ CATALOG(pg_index,2610,IndexRelationId) BKI_SCHEMA_MACRO */ typedef FormData_pg_index *Form_pg_index; -DECLARE_TOAST_WITH_MACRO(pg_index, 8149, 8150, PgIndexToastTable, PgIndexToastIndex); +DECLARE_TOAST_WITH_MACRO(pg_index, 6351, 6352, PgIndexToastTable, PgIndexToastIndex); DECLARE_INDEX(pg_index_indrelid_index, 2678, IndexIndrelidIndexId, pg_index, btree(indrelid oid_ops)); DECLARE_UNIQUE_INDEX_PKEY(pg_index_indexrelid_index, 2679, IndexRelidIndexId, pg_index, btree(indexrelid oid_ops)); diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 4efc1bc499ab5..fb4f7f50350ad 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -1004,7 +1004,7 @@ { oid => '3129', descr => 'sort support', proname => 'btint2sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint2sortsupport' }, -{ oid => '9290', descr => 'skip support', +{ oid => '6402', descr => 'skip support', proname => 'btint2skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint2skipsupport' }, { oid => '351', descr => 'less-equal-greater', @@ -1013,7 +1013,7 @@ { oid => '3130', descr => 'sort support', proname => 'btint4sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint4sortsupport' }, -{ oid => '9291', descr => 'skip support', +{ oid => '6403', descr => 'skip support', proname => 'btint4skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint4skipsupport' }, { oid => '842', descr => 'less-equal-greater', @@ -1022,7 +1022,7 @@ { oid => '3131', descr => 'sort support', proname => 'btint8sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint8sortsupport' }, -{ oid => '9292', descr => 'skip support', +{ oid => '6404', descr => 'skip support', proname => 'btint8skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint8skipsupport' }, { oid => '354', descr => 'less-equal-greater', @@ -1043,7 +1043,7 @@ { oid => '3134', descr => 'sort support', proname => 'btoidsortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btoidsortsupport' }, -{ oid => '9293', descr => 'skip support', +{ oid => '6405', descr => 'skip support', proname => 'btoidskipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btoidskipsupport' }, { oid => '404', descr => 'less-equal-greater', @@ -1052,7 +1052,7 @@ { oid => '358', descr => 'less-equal-greater', proname => 'btcharcmp', proleakproof => 't', prorettype => 'int4', proargtypes => 'char char', prosrc => 'btcharcmp' }, -{ oid => '9294', descr => 'skip support', +{ oid => '6406', descr => 'skip support', proname => 'btcharskipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btcharskipsupport' }, { oid => '359', descr => 'less-equal-greater', @@ -1180,22 +1180,22 @@ proname => 'name', proleakproof => 't', prorettype => 'name', proargtypes => 'bpchar', prosrc => 'bpchar_name' }, -{ oid => '8577', descr => 'convert int2 to bytea', +{ oid => '6367', descr => 'convert int2 to bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int2', prosrc => 'int2_bytea' }, -{ oid => '8578', descr => 'convert int4 to bytea', +{ oid => '6368', descr => 'convert int4 to bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int4', prosrc => 'int4_bytea' }, -{ oid => '8579', descr => 'convert int8 to bytea', +{ oid => '6369', descr => 'convert int8 to bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int8', prosrc => 'int8_bytea' }, -{ oid => '8580', descr => 'convert bytea to int2', +{ oid => '6370', descr => 'convert bytea to int2', proname => 'int2', prorettype => 'int2', proargtypes => 'bytea', prosrc => 'bytea_int2' }, -{ oid => '8581', descr => 'convert bytea to int4', +{ oid => '6371', descr => 'convert bytea to int4', proname => 'int4', prorettype => 'int4', proargtypes => 'bytea', prosrc => 'bytea_int4' }, -{ oid => '8582', descr => 'convert bytea to int8', +{ oid => '6372', descr => 'convert bytea to int8', proname => 'int8', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'bytea_int8' }, @@ -1259,10 +1259,10 @@ { oid => '772', descr => 'hash', proname => 'hashvarlenaextended', prorettype => 'int8', proargtypes => 'internal int8', prosrc => 'hashvarlenaextended' }, -{ oid => '9708', descr => 'hash', +{ oid => '6413', descr => 'hash', proname => 'hashbytea', prorettype => 'int4', proargtypes => 'bytea', prosrc => 'hashbytea' }, -{ oid => '9709', descr => 'hash', +{ oid => '6414', descr => 'hash', proname => 'hashbyteaextended', prorettype => 'int8', proargtypes => 'bytea int8', prosrc => 'hashbyteaextended' }, { oid => '457', descr => 'hash', @@ -1301,34 +1301,34 @@ { oid => '781', descr => 'hash', proname => 'hashmacaddr8extended', prorettype => 'int8', proargtypes => 'macaddr8 int8', prosrc => 'hashmacaddr8extended' }, -{ oid => '9710', descr => 'hash', +{ oid => '6415', descr => 'hash', proname => 'hashdate', prorettype => 'int4', proargtypes => 'date', prosrc => 'hashdate' }, -{ oid => '9711', descr => 'hash', +{ oid => '6416', descr => 'hash', proname => 'hashdateextended', prorettype => 'int8', proargtypes => 'date int8', prosrc => 'hashdateextended' }, -{ oid => '9712', descr => 'hash', +{ oid => '6417', descr => 'hash', proname => 'hashbool', prorettype => 'int4', proargtypes => 'bool', prosrc => 'hashbool' }, -{ oid => '9713', descr => 'hash', +{ oid => '6418', descr => 'hash', proname => 'hashboolextended', prorettype => 'int8', proargtypes => 'bool int8', prosrc => 'hashboolextended' }, -{ oid => '9714', descr => 'hash', +{ oid => '6419', descr => 'hash', proname => 'hashxid', prorettype => 'int4', proargtypes => 'xid', prosrc => 'hashxid' }, -{ oid => '9715', descr => 'hash', +{ oid => '6420', descr => 'hash', proname => 'hashxidextended', prorettype => 'int8', proargtypes => 'xid int8', prosrc => 'hashxidextended' }, -{ oid => '9716', descr => 'hash', +{ oid => '6421', descr => 'hash', proname => 'hashxid8', prorettype => 'int4', proargtypes => 'xid8', prosrc => 'hashxid8' }, -{ oid => '9717', descr => 'hash', +{ oid => '6422', descr => 'hash', proname => 'hashxid8extended', prorettype => 'int8', proargtypes => 'xid8 int8', prosrc => 'hashxid8extended' }, -{ oid => '9718', descr => 'hash', +{ oid => '6423', descr => 'hash', proname => 'hashcid', prorettype => 'int4', proargtypes => 'cid', prosrc => 'hashcid' }, -{ oid => '9719', descr => 'hash', +{ oid => '6424', descr => 'hash', proname => 'hashcidextended', prorettype => 'int8', proargtypes => 'cid int8', prosrc => 'hashcidextended' }, @@ -1348,10 +1348,10 @@ proname => 'text_smaller', proleakproof => 't', prorettype => 'text', proargtypes => 'text text', prosrc => 'text_smaller' }, -{ oid => '8920', descr => 'larger of two', +{ oid => '6393', descr => 'larger of two', proname => 'bytea_larger', proleakproof => 't', prorettype => 'bytea', proargtypes => 'bytea bytea', prosrc => 'bytea_larger' }, -{ oid => '8921', descr => 'smaller of two', +{ oid => '6394', descr => 'smaller of two', proname => 'bytea_smaller', proleakproof => 't', prorettype => 'bytea', proargtypes => 'bytea bytea', prosrc => 'bytea_smaller' }, @@ -1533,7 +1533,7 @@ { oid => '6163', descr => 'number of set bits', proname => 'bit_count', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'bytea_bit_count' }, -{ oid => '8694', descr => 'reverse bytea', +{ oid => '6382', descr => 'reverse bytea', proname => 'reverse', prorettype => 'bytea', proargtypes => 'bytea', prosrc => 'bytea_reverse' }, @@ -1638,7 +1638,7 @@ proname => 'array_append', prosupport => 'array_append_support', proisstrict => 'f', prorettype => 'anycompatiblearray', proargtypes => 'anycompatiblearray anycompatible', prosrc => 'array_append' }, -{ oid => '8680', descr => 'planner support for array_append', +{ oid => '6378', descr => 'planner support for array_append', proname => 'array_append_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_append_support' }, { oid => '379', descr => 'prepend element onto front of array', @@ -1646,7 +1646,7 @@ proisstrict => 'f', prorettype => 'anycompatiblearray', proargtypes => 'anycompatible anycompatiblearray', prosrc => 'array_prepend' }, -{ oid => '8681', descr => 'planner support for array_prepend', +{ oid => '6379', descr => 'planner support for array_prepend', proname => 'array_prepend_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_prepend_support' }, { oid => '383', @@ -1784,17 +1784,17 @@ { oid => '6216', descr => 'take samples from array', proname => 'array_sample', provolatile => 'v', prorettype => 'anyarray', proargtypes => 'anyarray int4', prosrc => 'array_sample' }, -{ oid => '8686', descr => 'reverse array', +{ oid => '6381', descr => 'reverse array', proname => 'array_reverse', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'array_reverse' }, -{ oid => '8810', descr => 'sort array', +{ oid => '6388', descr => 'sort array', proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'array_sort' }, -{ oid => '8811', descr => 'sort array', +{ oid => '6389', descr => 'sort array', proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray bool', proargnames => '{array,descending}', prosrc => 'array_sort_order' }, -{ oid => '8812', descr => 'sort array', +{ oid => '6390', descr => 'sort array', proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray bool bool', proargnames => '{array,descending,nulls_first}', @@ -2315,7 +2315,7 @@ { oid => '3136', descr => 'sort support', proname => 'date_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'date_sortsupport' }, -{ oid => '9295', descr => 'skip support', +{ oid => '6407', descr => 'skip support', proname => 'date_skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'date_skipsupport' }, { oid => '4133', descr => 'window RANGE support', @@ -3433,7 +3433,7 @@ proname => 'pg_sequence_last_value', provolatile => 'v', proparallel => 'u', prorettype => 'int8', proargtypes => 'regclass', prosrc => 'pg_sequence_last_value' }, -{ oid => '9876', descr => 'return sequence tuple, for use by pg_dump', +{ oid => '6427', descr => 'return sequence tuple, for use by pg_dump', proname => 'pg_get_sequence_data', provolatile => 'v', proparallel => 'u', prorettype => 'record', proargtypes => 'regclass', proallargtypes => '{regclass,int8,bool}', proargmodes => '{i,o,o}', @@ -3594,10 +3594,10 @@ proname => 'erfc', prorettype => 'float8', proargtypes => 'float8', prosrc => 'derfc' }, -{ oid => '8702', descr => 'gamma function', +{ oid => '6383', descr => 'gamma function', proname => 'gamma', prorettype => 'float8', proargtypes => 'float8', prosrc => 'dgamma' }, -{ oid => '8703', +{ oid => '6384', descr => 'natural logarithm of absolute value of gamma function', proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8', prosrc => 'dlgamma' }, @@ -3689,7 +3689,7 @@ { oid => '872', descr => 'capitalize each word', proname => 'initcap', prorettype => 'text', proargtypes => 'text', prosrc => 'initcap' }, -{ oid => '9569', descr => 'fold case', +{ oid => '6412', descr => 'fold case', proname => 'casefold', prorettype => 'text', proargtypes => 'text', prosrc => 'casefold' }, { oid => '873', descr => 'left-pad string to length', @@ -4516,7 +4516,7 @@ { oid => '1693', descr => 'less-equal-greater', proname => 'btboolcmp', proleakproof => 't', prorettype => 'int4', proargtypes => 'bool bool', prosrc => 'btboolcmp' }, -{ oid => '9296', descr => 'skip support', +{ oid => '6408', descr => 'skip support', proname => 'btboolskipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btboolskipsupport' }, @@ -5451,17 +5451,17 @@ prorettype => 'bool', proargtypes => 'oid text', prosrc => 'has_any_column_privilege_id' }, -{ oid => '8048', +{ oid => '6348', descr => 'user privilege on large object by username, large object oid', proname => 'has_largeobject_privilege', procost => '10', provolatile => 's', prorettype => 'bool', proargtypes => 'name oid text', prosrc => 'has_largeobject_privilege_name_id' }, -{ oid => '8049', +{ oid => '6349', descr => 'current user privilege on large object by large object oid', proname => 'has_largeobject_privilege', procost => '10', provolatile => 's', prorettype => 'bool', proargtypes => 'oid text', prosrc => 'has_largeobject_privilege_id' }, -{ oid => '8050', +{ oid => '6350', descr => 'user privilege on large object by user oid, large object oid', proname => 'has_largeobject_privilege', procost => '10', provolatile => 's', prorettype => 'bool', proargtypes => 'oid oid text', @@ -5612,19 +5612,19 @@ proname => 'pg_stat_get_autoanalyze_count', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_autoanalyze_count' }, -{ oid => '8406', descr => 'total vacuum time, in milliseconds', +{ oid => '6358', descr => 'total vacuum time, in milliseconds', proname => 'pg_stat_get_total_vacuum_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_vacuum_time' }, -{ oid => '8407', descr => 'total autovacuum time, in milliseconds', +{ oid => '6359', descr => 'total autovacuum time, in milliseconds', proname => 'pg_stat_get_total_autovacuum_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_autovacuum_time' }, -{ oid => '8408', descr => 'total analyze time, in milliseconds', +{ oid => '6360', descr => 'total analyze time, in milliseconds', proname => 'pg_stat_get_total_analyze_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_analyze_time' }, -{ oid => '8409', descr => 'total autoanalyze time, in milliseconds', +{ oid => '6361', descr => 'total autoanalyze time, in milliseconds', proname => 'pg_stat_get_total_autoanalyze_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_autoanalyze_time' }, @@ -5901,12 +5901,12 @@ proname => 'pg_stat_get_db_sessions_killed', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_db_sessions_killed' }, -{ oid => '8403', +{ oid => '6355', descr => 'statistics: number of parallel workers planned to be launched by queries', proname => 'pg_stat_get_db_parallel_workers_to_launch', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_db_parallel_workers_to_launch' }, -{ oid => '8404', +{ oid => '6356', descr => 'statistics: number of parallel workers effectively launched by queries', proname => 'pg_stat_get_db_parallel_workers_launched', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', @@ -5928,7 +5928,7 @@ proname => 'pg_stat_get_checkpointer_num_requested', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_checkpointer_num_requested' }, -{ oid => '8599', +{ oid => '6377', descr => 'statistics: number of checkpoints performed by the checkpointer', proname => 'pg_stat_get_checkpointer_num_performed', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', @@ -5955,7 +5955,7 @@ proname => 'pg_stat_get_checkpointer_buffers_written', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_checkpointer_buffers_written' }, -{ oid => '8573', +{ oid => '6366', descr => 'statistics: number of SLRU buffers written during checkpoints and restartpoints', proname => 'pg_stat_get_checkpointer_slru_written', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', @@ -6001,7 +6001,7 @@ proargnames => '{backend_type,object,context,reads,read_bytes,read_time,writes,write_bytes,write_time,writebacks,writeback_time,extends,extend_bytes,extend_time,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}', prosrc => 'pg_stat_get_io' }, -{ oid => '8806', descr => 'statistics: backend IO statistics', +{ oid => '6386', descr => 'statistics: backend IO statistics', proname => 'pg_stat_get_backend_io', prorows => '5', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => 'int4', @@ -6017,7 +6017,7 @@ proargmodes => '{o,o,o,o,o}', proargnames => '{wal_records,wal_fpi,wal_bytes,wal_buffers_full,stats_reset}', prosrc => 'pg_stat_get_wal' }, -{ oid => '8037', descr => 'statistics: backend WAL activity', +{ oid => '6313', descr => 'statistics: backend WAL activity', proname => 'pg_stat_get_backend_wal', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => 'int4', proallargtypes => '{int4,int8,int8,numeric,int8,timestamptz}', @@ -6156,7 +6156,7 @@ proname => 'pg_stat_reset_single_function_counters', provolatile => 'v', prorettype => 'void', proargtypes => 'oid', prosrc => 'pg_stat_reset_single_function_counters' }, -{ oid => '8807', descr => 'statistics: reset statistics for a single backend', +{ oid => '6387', descr => 'statistics: reset statistics for a single backend', proname => 'pg_stat_reset_backend_stats', provolatile => 'v', prorettype => 'void', proargtypes => 'int4', prosrc => 'pg_stat_reset_backend_stats' }, @@ -6370,10 +6370,10 @@ { oid => '3411', descr => 'hash', proname => 'timestamp_hash_extended', prorettype => 'int8', proargtypes => 'timestamp int8', prosrc => 'timestamp_hash_extended' }, -{ oid => '9720', descr => 'hash', +{ oid => '6425', descr => 'hash', proname => 'timestamptz_hash', prorettype => 'int4', proargtypes => 'timestamptz', prosrc => 'timestamptz_hash' }, -{ oid => '9721', descr => 'hash', +{ oid => '6426', descr => 'hash', proname => 'timestamptz_hash_extended', prorettype => 'int8', proargtypes => 'timestamptz int8', prosrc => 'timestamptz_hash_extended' }, { oid => '2041', descr => 'intervals overlap?', @@ -6398,7 +6398,7 @@ { oid => '3137', descr => 'sort support', proname => 'timestamp_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'timestamp_sortsupport' }, -{ oid => '9297', descr => 'skip support', +{ oid => '6409', descr => 'skip support', proname => 'timestamp_skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'timestamp_skipsupport' }, @@ -6594,7 +6594,7 @@ proname => 'pg_describe_object', provolatile => 's', prorettype => 'text', proargtypes => 'oid oid int4', prosrc => 'pg_describe_object' }, -{ oid => '8730', descr => 'get ACL for SQL object', +{ oid => '6385', descr => 'get ACL for SQL object', proname => 'pg_get_acl', provolatile => 's', prorettype => '_aclitem', proargtypes => 'oid oid int4', proargnames => '{classid,objid,objsubid}', prosrc => 'pg_get_acl' }, @@ -6793,7 +6793,7 @@ proargnames => '{rm_id, rm_name, rm_builtin}', prosrc => 'pg_get_wal_resource_managers' }, -{ oid => '8303', descr => 'get info about loaded modules', +{ oid => '6353', descr => 'get info about loaded modules', proname => 'pg_get_loaded_modules', prorows => '10', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '', proallargtypes => '{text,text,text}', @@ -6993,7 +6993,7 @@ proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'aggregate_dummy' }, -{ oid => '8595', descr => 'maximum value of all record input values', +{ oid => '6373', descr => 'maximum value of all record input values', proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'record', proargtypes => 'record', prosrc => 'aggregate_dummy' }, { oid => '2244', descr => 'maximum value of all bpchar input values', @@ -7011,7 +7011,7 @@ { oid => '5099', descr => 'maximum value of all xid8 input values', proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'xid8', proargtypes => 'xid8', prosrc => 'aggregate_dummy' }, -{ oid => '8922', descr => 'maximum value of all bytea input values', +{ oid => '6395', descr => 'maximum value of all bytea input values', proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'bytea', proargtypes => 'bytea', prosrc => 'aggregate_dummy' }, @@ -7069,7 +7069,7 @@ proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'aggregate_dummy' }, -{ oid => '8596', descr => 'minimum value of all record input values', +{ oid => '6374', descr => 'minimum value of all record input values', proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'record', proargtypes => 'record', prosrc => 'aggregate_dummy' }, { oid => '2245', descr => 'minimum value of all bpchar input values', @@ -7087,7 +7087,7 @@ { oid => '5100', descr => 'minimum value of all xid8 input values', proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'xid8', proargtypes => 'xid8', prosrc => 'aggregate_dummy' }, -{ oid => '8923', descr => 'minimum value of all bytea input values', +{ oid => '6396', descr => 'minimum value of all bytea input values', proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'bytea', proargtypes => 'bytea', prosrc => 'aggregate_dummy' }, @@ -7950,10 +7950,10 @@ proargtypes => 'internal', prosrc => 'tsm_system_handler' }, # CRC variants -{ oid => '8571', descr => 'CRC-32 value', +{ oid => '6364', descr => 'CRC-32 value', proname => 'crc32', proleakproof => 't', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'crc32_bytea' }, -{ oid => '8572', descr => 'CRC-32C value', +{ oid => '6365', descr => 'CRC-32C value', proname => 'crc32c', proleakproof => 't', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'crc32c_bytea' }, @@ -8497,7 +8497,7 @@ proargmodes => '{o,o,o,o,o,o}', proargnames => '{name,statement,is_holdable,is_binary,is_scrollable,creation_time}', prosrc => 'pg_cursor' }, -{ oid => '9221', descr => 'get abbreviations from current timezone', +{ oid => '6401', descr => 'get abbreviations from current timezone', proname => 'pg_timezone_abbrevs_zone', prorows => '10', proretset => 't', provolatile => 's', prorettype => 'record', proargtypes => '', proallargtypes => '{text,interval,bool}', proargmodes => '{o,o,o}', @@ -8609,7 +8609,7 @@ prosupport => 'generate_series_numeric_support', proretset => 't', prorettype => 'numeric', proargtypes => 'numeric numeric', prosrc => 'generate_series_numeric' }, -{ oid => '8405', descr => 'planner support for generate_series', +{ oid => '6357', descr => 'planner support for generate_series', proname => 'generate_series_numeric_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'generate_series_numeric_support' }, { oid => '938', descr => 'non-persistent series generator', @@ -8629,7 +8629,7 @@ prorettype => 'timestamptz', proargtypes => 'timestamptz timestamptz interval text', prosrc => 'generate_series_timestamptz_at_zone' }, -{ oid => '8402', descr => 'planner support for generate_series', +{ oid => '6354', descr => 'planner support for generate_series', proname => 'generate_series_timestamp_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'generate_series_timestamp_support' }, @@ -9468,7 +9468,7 @@ { oid => '3300', descr => 'sort support', proname => 'uuid_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'uuid_sortsupport' }, -{ oid => '9298', descr => 'skip support', +{ oid => '6410', descr => 'skip support', proname => 'uuid_skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'uuid_skipsupport' }, { oid => '2961', descr => 'I/O', @@ -9486,13 +9486,13 @@ { oid => '3432', descr => 'generate random UUID', proname => 'gen_random_uuid', provolatile => 'v', prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, -{ oid => '9895', descr => 'generate UUID version 4', +{ oid => '6428', descr => 'generate UUID version 4', proname => 'uuidv4', provolatile => 'v', prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, -{ oid => '9896', descr => 'generate UUID version 7', +{ oid => '6429', descr => 'generate UUID version 7', proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid', proargtypes => '', prosrc => 'uuidv7' }, -{ oid => '9897', +{ oid => '6430', descr => 'generate UUID version 7 with a timestamp shifted by specified interval', proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid', proargtypes => 'interval', proargnames => '{shift}', @@ -10654,10 +10654,10 @@ { oid => '2987', descr => 'less-equal-greater', proname => 'btrecordcmp', prorettype => 'int4', proargtypes => 'record record', prosrc => 'btrecordcmp' }, -{ oid => '8597', descr => 'larger of two', +{ oid => '6375', descr => 'larger of two', proname => 'record_larger', prorettype => 'record', proargtypes => 'record record', prosrc => 'record_larger' }, -{ oid => '8598', descr => 'smaller of two', +{ oid => '6376', descr => 'smaller of two', proname => 'record_smaller', prorettype => 'record', proargtypes => 'record record', prosrc => 'record_smaller' }, @@ -10897,7 +10897,7 @@ { oid => '3870', descr => 'less-equal-greater', proname => 'range_cmp', prorettype => 'int4', proargtypes => 'anyrange anyrange', prosrc => 'range_cmp' }, -{ oid => '8849', descr => 'sort support', +{ oid => '6391', descr => 'sort support', proname => 'range_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'range_sortsupport' }, { oid => '3871', @@ -12316,7 +12316,7 @@ proname => 'array_subscript_handler', prosupport => 'array_subscript_handler_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_subscript_handler' }, -{ oid => '8682', descr => 'planner support for array_subscript_handler', +{ oid => '6380', descr => 'planner support for array_subscript_handler', proname => 'array_subscript_handler_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_subscript_handler_support' }, { oid => '6180', descr => 'raw array subscripting support', @@ -12355,7 +12355,7 @@ provolatile => 'v', prorettype => 'record', proargtypes => '', proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}', proargnames => '{name,size,modification}', prosrc => 'pg_ls_waldir' }, -{ oid => '9220', descr => 'list of files in the pg_wal/summaries directory', +{ oid => '6400', descr => 'list of files in the pg_wal/summaries directory', proname => 'pg_ls_summariesdir', procost => '10', prorows => '20', proretset => 't', provolatile => 'v', prorettype => 'record', proargtypes => '', proallargtypes => '{text,int8,timestamptz}', @@ -12511,21 +12511,21 @@ proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}', prosrc => 'pg_get_wal_summarizer_state' }, # Statistics Import -{ oid => '8459', descr => 'restore statistics on relation', +{ oid => '6362', descr => 'restore statistics on relation', proname => 'pg_restore_relation_stats', provariadic => 'any', proisstrict => 'f', provolatile => 'v', proparallel => 'u', prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}', proargnames => '{kwargs}', prosrc => 'pg_restore_relation_stats' }, -{ oid => '9160', descr => 'clear statistics on relation', +{ oid => '6397', descr => 'clear statistics on relation', proname => 'pg_clear_relation_stats', proisstrict => 'f', provolatile => 'v', proparallel => 'u', prorettype => 'void', proargtypes => 'text text', proargnames => '{schemaname,relname}', prosrc => 'pg_clear_relation_stats' }, -{ oid => '8461', descr => 'restore statistics on attribute', +{ oid => '6363', descr => 'restore statistics on attribute', proname => 'pg_restore_attribute_stats', provariadic => 'any', proisstrict => 'f', provolatile => 'v', proparallel => 'u', prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}', proargnames => '{kwargs}', prosrc => 'pg_restore_attribute_stats' }, -{ oid => '9162', descr => 'clear statistics on attribute', +{ oid => '6398', descr => 'clear statistics on attribute', proname => 'pg_clear_attribute_stats', proisstrict => 'f', provolatile => 'v', proparallel => 'u', prorettype => 'void', proargtypes => 'text text text bool', @@ -12533,12 +12533,12 @@ prosrc => 'pg_clear_attribute_stats' }, # GiST stratnum implementations -{ oid => '8047', descr => 'GiST support', +{ oid => '6347', descr => 'GiST support', proname => 'gist_translate_cmptype_common', prorettype => 'int2', proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' }, # AIO related functions -{ oid => '9200', descr => 'information about in-progress asynchronous IOs', +{ oid => '6399', descr => 'information about in-progress asynchronous IOs', proname => 'pg_get_aios', prorows => '100', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '', From 2652835d3efa003439ecc23d5fc3cf089c5952a6 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Sun, 29 Jun 2025 22:28:10 -0400 Subject: [PATCH 091/181] Stamp HEAD as 19devel. Let the hacking begin ... --- configure | 18 +- configure.ac | 2 +- doc/src/sgml/filelist.sgml | 2 +- doc/src/sgml/release-18.sgml | 4200 ---------------------------------- doc/src/sgml/release-19.sgml | 16 + doc/src/sgml/release.sgml | 2 +- meson.build | 2 +- src/tools/git_changelog | 1 + src/tools/version_stamp.pl | 2 +- 9 files changed, 31 insertions(+), 4214 deletions(-) delete mode 100644 doc/src/sgml/release-18.sgml create mode 100644 doc/src/sgml/release-19.sgml diff --git a/configure b/configure index 3d3d3db97a456..16ef5b58d1a87 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for PostgreSQL 18beta1. +# Generated by GNU Autoconf 2.69 for PostgreSQL 19devel. # # Report bugs to . # @@ -582,8 +582,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='PostgreSQL' PACKAGE_TARNAME='postgresql' -PACKAGE_VERSION='18beta1' -PACKAGE_STRING='PostgreSQL 18beta1' +PACKAGE_VERSION='19devel' +PACKAGE_STRING='PostgreSQL 19devel' PACKAGE_BUGREPORT='pgsql-bugs@lists.postgresql.org' PACKAGE_URL='https://www.postgresql.org/' @@ -1468,7 +1468,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures PostgreSQL 18beta1 to adapt to many kinds of systems. +\`configure' configures PostgreSQL 19devel to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1533,7 +1533,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of PostgreSQL 18beta1:";; + short | recursive ) echo "Configuration of PostgreSQL 19devel:";; esac cat <<\_ACEOF @@ -1724,7 +1724,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -PostgreSQL configure 18beta1 +PostgreSQL configure 19devel generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2477,7 +2477,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by PostgreSQL $as_me 18beta1, which was +It was created by PostgreSQL $as_me 19devel, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -20063,7 +20063,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by PostgreSQL $as_me 18beta1, which was +This file was extended by PostgreSQL $as_me 19devel, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -20134,7 +20134,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -PostgreSQL config.status 18beta1 +PostgreSQL config.status 19devel configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 4b8335dc6138e..b3efc49c97a9d 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ dnl Read the Autoconf manual for details. dnl m4_pattern_forbid(^PGAC_)dnl to catch undefined macros -AC_INIT([PostgreSQL], [18beta1], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/]) +AC_INIT([PostgreSQL], [19devel], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/]) m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required. Untested combinations of 'autoconf' and PostgreSQL versions are not diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index fef9584f908ec..bcde3cfd0374a 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -180,7 +180,7 @@ - + diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml deleted file mode 100644 index 66a6817a2be0f..0000000000000 --- a/doc/src/sgml/release-18.sgml +++ /dev/null @@ -1,4200 +0,0 @@ - - - - - Release 18 - - - Release date: - 2025-??-??, CURRENT AS OF 2025-06-20 - - - - Overview - - - PostgreSQL 18 contains many new features - and enhancements, including: - - - - - - - (to be completed) - - - - - - The above items and other new features of - PostgreSQL 18 are explained in more detail - in the sections below. - - - - - - - Migration to Version 18 - - - A dump/restore using or use of - or logical replication is required for - those wishing to migrate data from any previous release. See for general information on migrating to new - major releases. - - - - Version 18 contains a number of changes that may affect compatibility - with previous releases. Observe the following incompatibilities: - - - - - - - - - Change default to enable data checksums - (Greg Sabino Mullane) - § - - - - Checksums can be disabled with the - new initdb option - . - requires matching cluster checksum settings, so this new option can - be useful to upgrade non-checksum old clusters. - - - - - - - - Change time zone abbreviation handling (Tom Lane) - § - - - - The system will now favor the current session's time - zone abbreviations before checking the server variable - . Previously - timezone_abbreviations was checked first. - - - - - - - - Deprecate MD5 password - authentication (Nathan Bossart) - § - - - - Support for MD5 passwords will be removed in a future major - version release. and now emit deprecation warnings when - setting MD5 passwords. These warnings can be disabled by setting - the parameter to - off. - - - - - - - - Change and - to process the inheritance children of a parent (Michael Harris) - § - - - - The previous behavior can be performed by using the new - ONLY option. - - - - - - - - Prevent COPY FROM - from treating \. as an end-of-file marker when - reading CSV files (Daniel Vérité, Tom Lane) - § - § - - - - will still treat - \. as an end-of-file marker when reading - CSV files from STDIN. - Older psql clients connecting to - PostgreSQL 18 servers might experience \copy - problems. This release also enforces that \. - must appear alone on a line. - - - - - - - - Disallow unlogged partitioned tables (Michael Paquier) - § - - - - Previously ALTER TABLE SET - [UN]LOGGED did nothing, and the creation of an - unlogged partitioned table did not cause its children to be unlogged. - - - - - - - - Execute AFTER triggers as the role that was active when - trigger events were queued (Laurenz Albe) - § - - - - Previously such triggers were run as the role that was active at - trigger execution time (e.g., at ). - This is significant for cases where the role is changed between queue - time and transaction commit. - - - - - - - - Remove non-functional support for rule privileges in / (Fujii Masao) - § - - - - These have been non-functional since - PostgreSQL 8.2. - - - - - - - - Remove column pg_backend_memory_contexts.parent - (Melih Mutlu) - § - - - - This is no longer needed since - pg_backend_memory_contexts.path - was added. - - - - - - - - Change - pg_backend_memory_contexts.level - and pg_log_backend_memory_contexts() - to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, - Fujii Masao) - § - § - § - - - - These were previously zero-based. - - - - - - - - - Changes - - - Below you will find a detailed account of the changes between - PostgreSQL 18 and the previous major - release. - - - - Server - - - Optimizer - - - - - - - - Automatically remove some unnecessary table self-joins (Andrey - Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) - § - - - - This optimization can be disabled using server variable . - - - - - - - - Convert some IN (VALUES - ...) to x = ANY ... for better - optimizer statistics (Alena Rybakina, Andrei Lepikhov) - § - - - - - - - - Allow transforming OR-clauses - to arrays for faster index processing (Alexander Korotkov, Andrey - Lepikhov) - § - - - - - - - - Speed up the processing of INTERSECT, - EXCEPT, window aggregates, and view column aliases (Tom Lane, - David Rowley) - § - § - § - § - - - - - - - - Allow the keys of SELECT - DISTINCT to be internally reordered to avoid sorting - (Richard Guo) - § - - - - This optimization can be disabled using . - - - - - - - - Ignore GROUP BY - columns that are functionally dependent on other columns (Zhang - Mingli, Jian He, David Rowley) - § - - - - If a GROUP BY clause includes all columns of - a unique index, as well as other columns of the same table, those - other columns are redundant and can be dropped from the grouping. - This was already true for non-deferred primary keys. - - - - - - - - Allow some HAVING clauses - on GROUPING - SETS to be pushed to WHERE clauses - (Richard Guo) - § - § - § - § - - - - This allows earlier row filtering. This release also fixes some - GROUPING SETS queries that used to return - incorrect results. - - - - - - - - Improve row estimates for generate_series() - using numeric - and timestamp - values (David Rowley, Song Jinzhou) - § - § - - - - - - - - Allow the optimizer to use Right Semi Join plans - (Richard Guo) - § - - - - Semi-joins are used when needing to find if there is at least - one match. - - - - - - - - Allow merge joins to use incremental sorts - (Richard Guo) - § - - - - - - - - Improve the efficiency of planning queries accessing many partitions - (Ashutosh Bapat, Yuya Watari, David Rowley) - § - § - - - - - - - - Allow partitionwise - joins in more cases, and reduce its memory usage (Richard Guo, - Tom Lane, Ashutosh Bapat) - § - § - - - - - - - - Improve cost estimates of partition queries (Nikita Malakhov, - Andrei Lepikhov) - § - - - - - - - - Improve SQL-language - function plan caching (Alexander Pyhalov, Tom Lane) - § - § - - - - - - - - Improve handling of disabled optimizer features (Robert Haas) - § - - - - - - - - - Indexes - - - - - - - - Allow skip scans of btree indexes - (Peter Geoghegan) - § - § - - - - This allows multi-column btree indexes to be used by queries that - only equality-reference the second or later indexed columns. - - - - - - - - Allow non-btree unique indexes to be used as partition keys and in - materialized views (Mark Dilger) - § - § - - - - The index type must still support equality. - - - - - - - - Allow GIN indexes to - be created in parallel (Tomas Vondra, Matthias van de Meent) - § - - - - - - - - Allow values to be sorted to speed range-type GiST and btree - index builds (Bernd Helmle) - § - - - - - - - - - General Performance - - - - - - - - Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, - Nazir Bilal Yavuz, Melanie Plageman) - § - § - § - § - § - § - § - § - § - § - § - - - - This feature allows backends to queue multiple read requests, - which allows for more efficient sequential scans, bitmap - heap scans, vacuums, etc. This is enabled by server - variable , with server - variables and added to control it. - This also enables - and - values greater than zero for systems without - fadvise() support. The new system view pg_aios - shows the file handles being used for asynchronous I/O. - - - - - - - - Improve the locking performance of queries that access many relations - (Tomas Vondra) - § - - - - - - - - Improve the performance and reduce memory usage of hash joins and - GROUP BY - (David Rowley, Jeff Davis) - § - § - § - § - § - - - - This also improves hash set operations used by EXCEPT, and hash - lookups of subplan values. - - - - - - - - Allow normal vacuums to freeze some pages, even though they are - all-visible (Melanie Plageman) - § - § - - - - This reduces the overhead of later full-relation - freezing. The aggressiveness of this can be - controlled by server variable and per-table setting . Previously - vacuum never processed all-visible pages until freezing was required. - - - - - - - - Add server variable to control - file truncation during (Nathan Bossart, - Gurjeet Singh) - § - - - - A storage-level parameter with the same name and behavior already - existed. - - - - - - - - Increase server variables 's and 's default values to 16 - (Melanie Plageman) - § - § - - - - This more accurately reflects modern hardware. - - - - - - - - - Monitoring - - - - - - - - Increase the logging granularity of server variable (Melanie Plageman) - § - - - - This server variable was previously only boolean, which is still - supported. - - - - - - - - Add log_connections option to report the duration - of connection stages (Melanie Plageman) - § - - - - - - - - Add escape - %L to output the client IP - address (Greg Sabino Mullane) - § - - - - - - - - Add server variable to log - lock acquisition failures (Yuki Seino, Fujii Masao) - § - § - - - - Specifically it reports SELECT - ... NOWAIT lock failures. - - - - - - - - Modify pg_stat_all_tables - and its variants to report the time spent in , , and their - automatic variants (Sami Imseih) - § - - - - The new columns are total_vacuum_time, - total_autovacuum_time, - total_analyze_time, and - total_autoanalyze_time. - - - - - - - - Add delay time reporting to and (Bertrand Drouvot, Nathan Bossart) - § - § - - - - This information appears in the server log, the system views pg_stat_progress_vacuum - and pg_stat_progress_analyze, - and the output of and when in VERBOSE - mode; tracking must be enabled with the server variable . - - - - - - - - Add WAL, CPU, and average - read statistics output to ANALYZE VERBOSE - (Anthonin Bonnefoy) - § - § - - - - - - - - Add full WAL buffer count to - VACUUM/ANALYZE (VERBOSE) - and autovacuum log output (Bertrand Drouvot) - § - - - - - - - - Add per-backend I/O statistics reporting (Bertrand Drouvot) - § - § - - - - The statistics are accessed via pg_stat_get_backend_io(). - Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). - - - - - - - - Add pg_stat_io - columns to report I/O activity in bytes (Nazir Bilal Yavuz) - § - - - - The new columns are read_bytes, - write_bytes, and - extend_bytes. The - op_bytes column, which always equaled - BLCKSZ, - has been removed. - - - - - - - - Add WAL I/O activity rows to - pg_stat_io (Nazir Bilal Yavuz, Bertrand - Drouvot, Michael Paquier) - § - § - § - - - - This includes WAL receiver activity and a wait - event for such writes. - - - - - - - - - Change server variable - to control tracking WAL timing - in pg_stat_io instead of pg_stat_wal - (Bertrand Drouvot) - § - - - - - - - - Remove read/sync columns from pg_stat_wal - (Bertrand Drouvot) - § - § - - - - This removes columns wal_write, - wal_sync, - wal_write_time, and - wal_sync_time. - - - - - - - - Add function pg_stat_get_backend_wal() - to return per-backend WAL statistics (Bertrand - Drouvot) - § - - - - Per-backend WAL - statistics can be cleared via pg_stat_reset_backend_stats(). - - - - - - - - Add function pg_ls_summariesdir() - to specifically list the contents of PGDATA/pg_wal/summaries - (Yushi Ogiwara) - § - - - - - - - - Add column pg_stat_checkpointer.num_done - to report the number of completed checkpoints (Anton A. Melnikov) - § - - - - Columns num_timed and - num_requested count both completed and - skipped checkpoints. - - - - - - - - Add column - pg_stat_checkpointer.slru_written - to report SLRU buffers written (Nitin Jadhav) - § - - - - Also, modify the checkpoint server log message to report separate - shared buffer and SLRU buffer values. - - - - - - - - Add columns to pg_stat_database - to report parallel worker activity (Benoit Lobréau) - § - - - - The new columns are - parallel_workers_to_launch and - parallel_workers_launched. - - - - - - - - Have query id computation - of arrays consider only the first and last array elements (Dmitry - Dolgov, Sami Imseih) - § - § - - - - Jumbling is used by . - - - - - - - - Adjust query id computations to group together queries using the - same relation name (Michael Paquier, Sami Imseih) - § - - - - This is true even if the tables in different schemas have different - column names. - - - - - - - - Add column pg_backend_memory_contexts.type - to report the type of memory context (David Rowley) - § - - - - - - - - Add column - pg_backend_memory_contexts.path - to show memory context parents (Melih Mutlu) - § - - - - - - - - - Privileges - - - - - - - - Add function pg_get_acl() - to retrieve database access control details (Joel Jacobson) - § - § - - - - - - - - Add function has_largeobject_privilege() - to check large object privileges (Yugo Nagata) - § - - - - - - - - Allow to define - large object default privileges (Takatsuka Haruka, Yugo Nagata, - Laurenz Albe) - § - - - - - - - - Add predefined role pg_signal_autovacuum_worker - (Kirill Reshke) - § - - - - This allows sending signals to autovacuum workers. - - - - - - - - - Server Configuration - - - - - - - - Add support for the OAuth authentication - method (Jacob Champion, Daniel Gustafsson, Thomas Munro) - § - - - - This adds an oauth authentication method to pg_hba.conf, - libpq OAuth options, a server variable to load - token validation libraries, and a configure flag - to add the required compile-time libraries. - - - - - - - - Add server variable to allow - specification of multiple colon-separated TLSv1.3 cipher suites - (Erica Zhang, Daniel Gustafsson) - § - - - - - - - - Change server variable 's default - to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) - § - - - - - - - - Rename server variable ssl_ecdh_curve to and allow multiple colon-separated - ECDH curves to be specified (Erica Zhang, - Daniel Gustafsson) - § - - - - The previous name still works. - - - - - - - - Make cancel request - keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio) - § - § - - - - This is only possible when the server and client support wire - protocol version 3.2, introduced in this release. - - - - - - - - Add server variable - to specify the maximum number of background workers (Nathan Bossart) - § - - - - With this variable set, - can be adjusted at runtime up to this maximum without a server - restart. - - - - - - - - Allow specification of the fixed number of dead tuples that will - trigger an autovacuum (Nathan - Bossart, Frédéric Yhuel) - § - - - - The server variable is . Percentages are - still used for triggering. - - - - - - - - Change server variable - to limit only files opened by a backend (Andres Freund) - § - - - - Previously files opened by the postmaster were also counted toward - this limit. - - - - - - - - Add server variable to - report the required number of semaphores (Nathan Bossart) - § - - - - This is useful for operating system configuration. - - - - - - - - Add server variable to - specify the location of extension control files (Peter Eisentraut, - Matheus Alcantara) - § - § - - - - - - - - - Streaming Replication and Recovery - - - - - - - - Allow inactive replication slots to be automatically invalided using - server variable - (Nisha Moond, Bharath Rupireddy) - § - - - - - - - - Add server variable to control the - maximum active replication origins (Euler Taveira) - § - - - - This was previously controlled by , but this new setting allows - a higher origin count in cases where fewer slots are required. - - - - - - - - - <link linkend="logical-replication">Logical Replication</link> - - - - - - - - Allow the values of generated - columns to be logically replicated (Shubham Khanna, Vignesh C, - Zhijie Hou, Shlok Kyal, Peter Smith) - § - § - § - § - - - - If the publication specifies a column list, all specified - columns, generated and non-generated, are published. - Without a specified column list, publication option - publish_generated_columns controls whether - generated columns are published. Previously generated columns - were not replicated and the subscriber had to compute - the values if possible; this is particularly useful for - non-PostgreSQL subscribers which lack - such a capability. - - - - - - - - Change the default streaming - option from off to parallel - (Vignesh C) - § - - - - - - - - Allow to change the - replication slot's two-phase commit behavior (Hayato Kuroda, Ajin - Cherian, Amit Kapila, Zhijie Hou) - § - § - - - - - - - - Log conflicts while - applying logical replication changes (Zhijie Hou, Nisha Moond) - § - § - § - § - § - - - - Also report in new columns of pg_stat_subscription_stats. - - - - - - - - - - - Utility Commands - - - - - - - - Allow generated - columns to be virtual, and make them the default (Peter - Eisentraut, Jian He, Richard Guo, Dean Rasheed) - § - § - § - - - - Virtual generated columns generate their values when the columns - are read, not written. The write behavior can still be specified - via the STORED option. - - - - - - - - Add OLD/NEW support to RETURNING in - DML queries (Dean Rasheed) - § - - - - Previously RETURNING only returned new values for - and , and old - values for ; - would return the appropriate value for the internal query executed. - This new syntax allows the RETURNING list of - INSERT/UPDATE/DELETE/MERGE - to explicitly return old and new values by using the special aliases - old and new. These aliases - can be renamed to avoid identifier conflicts. - - - - - - - - Allow foreign tables to be created like existing local tables - (Zhang Mingli) - § - - - - The syntax is CREATE - FOREIGN TABLE ... LIKE. - - - - - - - - Allow LIKE - with nondeterministic - collations (Peter Eisentraut) - § - - - - - - - - Allow text position search functions with nondeterministic collations - (Peter Eisentraut) - § - - - - These used to generate an error. - - - - - - - - Add builtin collation provider PG_UNICODE_FAST - (Jeff Davis) - § - - - - This locale supports case mapping, but sorts in code point order, - not natural language order. - - - - - - - - Allow and - to process partitioned tables without processing their children - (Michael Harris) - § - - - - This is enabled with the new ONLY option. This is - useful since autovacuum does not process partitioned tables, just - its children. - - - - - - - - Add functions to modify per-relation and per-column optimizer - statistics (Corey Huinker) - § - § - § - - - - The functions are pg_restore_relation_stats(), - pg_restore_attribute_stats(), - pg_clear_relation_stats(), and - pg_clear_attribute_stats(). - - - - - - - - - Add server variable to control - the file copying method (Nazir Bilal Yavuz) - § - - - - This controls whether CREATE DATABASE - ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET - TABLESPACE uses file copy or clone. - - - - - - - <link linkend="ddl-constraints">Constraints</link> - - - - - - - - Allow the specification of non-overlapping PRIMARY - KEY and UNIQUE - constraints (Paul A. Jungwirth) - § - - - - This is specified by WITHOUT OVERLAPS on the - last specified column. - - - - - - - - Allow CHECK - and foreign - key constraints to be specified as NOT - ENFORCED (Amul Sul) - § - § - - - - This also adds column pg_constraint.conenforced. - - - - - - - - Require primary/foreign key - relationships to use either deterministic collations or the the - same nondeterministic collations (Peter Eisentraut) - § - - - - The restore of a , also used by , will fail if these requirements are not met; - schema changes must be made for these upgrade methods to succeed. - - - - - - - - Store column NOT - NULL specifications in pg_constraint - (Álvaro Herrera, Bernd Helmle) - § - - - - This allows names to be specified for NOT NULL - constraint. This also adds NOT NULL constraints - to foreign tables and NOT NULL inheritance - control to local tables. - - - - - - - - Allow to set the NOT - VALID attribute of NOT NULL constraints - (Rushabh Lathia, Jian He) - § - - - - - - - - Allow modification of the inheritability of NOT - NULL constraints (Suraj Kharage, Álvaro Herrera) - § - § - - - - The syntax is ALTER TABLE - ... ALTER CONSTRAINT ... [NO] INHERIT. - - - - - - - - Allow NOT VALID foreign key constraints on - partitioned tables (Amul Sul) - § - - - - - - - - Allow dropping - of constraints ONLY on partitioned tables - (Álvaro Herrera) - § - - - - This was previously erroneously prohibited. - - - - - - - - <xref linkend="sql-copy"/> - - - - - - - - Add REJECT_LIMIT to control the number of invalid - rows COPY FROM can ignore (Atsushi Torikoshi) - § - - - - This is available when ON_ERROR = 'ignore'. - - - - - - - - Allow COPY TO to copy rows from populated - materialized views (Jian He) - § - - - - - - - - Add COPY LOG_VERBOSITY level - silent to suppress log output of ignored rows - (Atsushi Torikoshi) - § - - - - This new level suppresses output for discarded input rows when - on_error = 'ignore'. - - - - - - - - Disallow COPY FREEZE on foreign tables (Nathan - Bossart) - § - - - - Previously, the COPY worked but the - FREEZE was ignored, so disallow this command. - - - - - - - - <xref linkend="sql-explain"/> - - - - - - - - Automatically include BUFFERS output in - EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley) - § - - - - - - - - Add full WAL buffer count to EXPLAIN - (WAL) output (Bertrand Drouvot) - § - - - - - - - - In EXPLAIN ANALYZE, report the number of index - lookups used per index scan node (Peter Geoghegan) - § - - - - - - - - Modify EXPLAIN to output fractional row counts - (Ibrar Ahmed, Ilia Evdokimov, Robert Haas) - § - § - - - - - - - - Add memory and disk usage details to Material, - Window Aggregate, and common table expression - nodes to EXPLAIN output (David Rowley, Tatsuo - Ishii) - § - § - § - § - - - - - - - - - Add details about window function arguments to - EXPLAIN output (Tom Lane) - § - - - - - - - - Add Parallel Bitmap Heap Scan worker cache - statistics to EXPLAIN ANALYZE (David Geier, - Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley) - § - - - - - - - - Indicate disabled nodes in EXPLAIN ANALYZE output - (Robert Haas, David Rowley, Laurenz Albe) - § - § - § - - - - - - - - - - - Data Types - - - - - - - - Improve Unicode - full case mapping and conversion (Jeff Davis) - § - § - - - - This adds the ability to do conditional and title case mapping, - and case map single characters to multiple characters. - - - - - - - - Allow jsonb - null values to be cast to scalar types as - NULL (Tom Lane) - § - - - - Previously such casts generated an error. - - - - - - - - Add optional parameter to json{b}_strip_nulls - to allow removal of null array elements (Florents Tselai) - § - - - - - - - - Add function array_sort() - which sorts an array's first dimension (Junwang Zhao, Jian He) - § - - - - - - - - Add function array_reverse() - which reverses an array's first dimension (Aleksander Alekseev) - § - - - - - - - - Add function reverse() - to reverse bytea bytes (Aleksander Alekseev) - § - - - - - - - - Allow casting between integer types and bytea (Aleksander - Alekseev) - § - - - - The integer values are stored as bytea two's complement - values. - - - - - - - - Update Unicode data to Unicode 16.0.0 (Peter - Eisentraut) - § - - - - - - - - Add full text search stemming for Estonian - (Tom Lane) - § - - - - - - - - Improve the XML - error codes to more closely match the SQL standard - (Tom Lane) - § - - - - These errors are reported via SQLSTATE. - - - - - - - - - Functions - - - - - - - - Add function casefold() - to allow for more sophisticated case-insensitive matching (Jeff Davis) - § - - - - This allows more accurate comparisons, i.e., a character can have - multiple upper or lower case equivalents, or upper or lower case - conversion changes the number of characters. - - - - - - - - Allow MIN()/MAX() - aggregates on arrays and composite types (Aleksander Alekseev, - Marat Buharov) - § - § - - - - - - - - Add a WEEK option to EXTRACT() - (Tom Lane) - § - - - - - - - - Improve the output EXTRACT(QUARTER ...) for - negative values (Tom Lane) - § - - - - - - - - Add roman numeral support to to_number() - (Hunaid Sohail) - § - - - - This is accessed via the RN pattern. - - - - - - - - Add UUID - version 7 generation function uuidv7() - (Andrey Borodin) - § - - - - This UUID value is - temporally sortable. Function alias uuidv4() - has been added to explicitly generate version 4 UUIDs. - - - - - - - - Add functions crc32() - and crc32c() - to compute CRC values (Aleksander Alekseev) - § - - - - - - - - Add math functions gamma() - and lgamma() - (Dean Rasheed) - § - - - - - - - - Allow => syntax for named cursor arguments in - PL/pgSQL (Pavel Stehule) - § - - - - We previously only accepted :=. - - - - - - - - Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() - to use named arguments (Jian He) - § - - - - - - - - - <xref linkend="libpq"/> - - - - - - - - Add function PQfullProtocolVersion() - to report the full, including minor, protocol version number (Jacob - Champion, Jelte Fennema-Nio) - § - - - - - - - - Add libpq connection parameters - and environment variables to - specify the minimum and maximum acceptable protocol version for - connections (Jelte Fennema-Nio) - § - § - - - - - - - - Add libpq function PQservice() - to return the connection service name (Michael Banck) - § - - - - - - - - Report changes to the client - (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) - § - § - - - - - - - - Add PQtrace() output - for all message types, including authentication (Jelte Fennema-Nio) - § - § - § - § - § - - - - - - - - Add libpq connection parameter sslkeylogfile - which dumps out SSL key material (Abhishek Chanda, - Daniel Gustafsson) - § - - - - This is useful for debugging. - - - - - - - - Modify some libpq function signatures to use - int64_t (Thomas Munro) - § - - - - These previously used pg_int64, which is now - deprecated. - - - - - - - - - <xref linkend="app-psql"/> - - - - - - - - Allow psql to parse, bind, and close - named prepared statements (Anthonin Bonnefoy, Michael Paquier) - § - - - - This is accomplished with new commands \parse, - \bind_named, - and \close_prepared. - - - - - - - - Add psql backslash commands to allowing - issuance of pipeline queries (Anthonin Bonnefoy) - § - § - § - - - - The new commands are \startpipeline, - \syncpipeline, \sendpipeline, - \endpipeline, \flushrequest, - \flush, and \getresults. - - - - - - - - Allow adding pipeline status to the psql - prompt and add related state variables (Anthonin Bonnefoy) - § - - - - The new prompt character is %P and - the new psql variables are PIPELINE_SYNC_COUNT, - PIPELINE_COMMAND_COUNT, - and PIPELINE_RESULT_COUNT. - - - - - - - - Allow adding the connection service name to the - psql prompt or access it via - psql variable (Michael Banck) - § - - - - - - - - Add psql option to use expanded mode on - all list commands (Dean Rasheed) - § - - - - Adding backslash suffix x enables this. - - - - - - - - Change psql's to use tabular format - and include more information (Álvaro Herrera, Maiquel Grassi, - Hunaid Sohail) - § - - - - - - - - Add function's leakproof indicator - to psql's \df+, - \do+, \dAo+, and - \dC+ outputs (Yugo Nagata) - § - - - - - - - - Add access method details for partitioned relations in \dP+ - (Justin Pryzby) - § - - - - - - - - Add default_version - to the psql \dx - extension output (Magnus Hagander) - § - - - - - - - - Add psql variable to set the default \watch - wait time (Daniel Gustafsson) - § - - - - - - - - - Server Applications - - - - - - - - Change to default to enabling checksums - (Greg Sabino Mullane) - § - § - - - - The new initdb option - disables checksums. - - - - - - - - Add initdb option - to avoid syncing heap/index - files (Nathan Bossart) - § - - - - initdb option - is still available to avoid syncing any files. - - - - - - - - Add option - to compute only missing - optimizer statistics (Corey Huinker, Nathan Bossart) - § - § - - - - This option can only be used by - and . - - - - - - - - Add option - / to enable hard linking - (Israel Barth Rubio, Robert Haas) - § - - - - Only some files can be hard linked. This should not be used if the - backups will be used independently. - - - - - - - - Allow to verify tar-format - backups (Amul Sul) - § - - - - - - - - If 's - specifies a database name, use it in - output (Masahiko Sawada) - § - - - - - - - - Add option - to change the default - char signedness (Masahiko Sawada) - § - - - - - - - - <link - linkend="app-pgdump"><application>pg_dump</application></link>/<link - linkend="app-pg-dumpall"><application>pg_dumpall</application></link>/<link - linkend="app-pgrestore"><application>pg_restore</application></link> - - - - - - - - Allow to dump in the same output - formats as pg_dump supports (Mahendra - Singh Thalor, Andrew Dunstan) - § - - - - Also modify to handle such dumps. - Previously pg_dumpall only supported - text format. - - - - - - - - Add options - , , - and (Jeff Davis) - § - - - - - - - - Add pg_dump and option to - dump sequence data that would normally be excluded (Nathan Bossart) - § - § - - - - - - - - Add , , - and options - , , - , and - (Corey Huinker, Jeff Davis) - § - - - - - - - - Add option to disable row level - security policy processing in , - , - (Nikolay Samokhvalov) - § - - - - This is useful for migrating to systems with different policies. - - - - - - - - - <xref linkend="pgupgrade"/> - - - - - - - - Allow pg_upgrade to preserve optimizer - statistics (Corey Huinker, Jeff Davis, Nathan Bossart) - § - § - § - § - - - - Extended statistics are not preserved. Also add - pg_upgrade option - to disable statistics preservation. - - - - - - - - Allow pg_upgrade to process database - checks in parallel (Nathan Bossart) - § - § - § - § - § - § - § - § - § - § - § - - - - This is controlled by the existing option. - - - - - - - - Add pg_upgrade option - to swap directories rather than copy, clone, - or link files (Nathan Bossart) - § - - - - This mode is potentially the fastest. - - - - - - - - Add pg_upgrade option - to set the default - char signedness of new cluster (Masahiko Sawada) - § - § - - - - This is to handle cases where a - pre-PostgreSQL 18 cluster's default - CPU signedness does not match the new cluster. - - - - - - - - - Logical Replication Applications - - - - - - - - Add option - to create logical replicas for all databases - (Shubham Khanna) - § - - - - - - - - Add pg_createsubscriber option - to remove publications (Shubham Khanna) - § - - - - - - - - Add pg_createsubscriber option - to enable prepared transactions - (Shubham Khanna) - § - - - - - - - - Add option - to specify failover slots (Hayato Kuroda) - § - - - - - - - - Allow pg_recvlogical - to work without - (Hayato Kuroda) - § - - - - - - - - - - - Source Code - - - - - - - - Separate the loading and running of injection points - (Michael Paquier, Heikki Linnakangas) - § - § - - - - Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), - and such injection points can be run via INJECTION_POINT_CACHED(). - - - - - - - - Support runtime arguments in injection points (Michael Paquier) - § - - - - - - - - Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() - (Heikki Linnakangas) - § - - - - - - - - Improve the performance of processing long JSON strings using - SIMD (Single Instruction Multiple Data) (David - Rowley) - § - - - - - - - - Speed up CRC32C calculations using x86 AVX-512 - instructions (Raghuveer Devulapalli, Paul Amonson) - § - - - - - - - - Add ARM Neon and SVE CPU - intrinsics for popcount (integer bit counting) (Chiranmoy - Bhattacharya, Devanga Susmitha, Rama Malladi) - § - § - - - - - - - - Improve the speed of numeric multiplication and division (Joel - Jacobson, Dean Rasheed) - § - § - § - § - - - - - - - - Add configure option - to enable NUMA awareness (Jakub Wartak, Bertrand - Drouvot) - § - § - § - - - - The function pg_numa_available() - reports on NUMA awareness, and system views pg_shmem_allocations_numa - and pg_buffercache_numa - which report on shared memory distribution across - NUMA nodes. - - - - - - - - Add TOAST table to pg_index - to allow for very large expression indexes (Nathan Bossart) - § - - - - - - - - Remove column pg_attribute.attcacheoff - (David Rowley) - § - - - - - - - - Add column pg_class.relallfrozen - (Melanie Plageman) - § - - - - - - - - Add amgettreeheight, - amconsistentequality, and - amconsistentordering to the index access method - API (Mark Dilger) - § - § - - - - - - - - Add GiST support function stratnum() - (Paul A. Jungwirth) - § - - - - - - - - Record the default CPU signedness of - char in - (Masahiko Sawada) - § - - - - - - - - Add support for Python "Limited API" in (Peter Eisentraut) - § - § - - - - This helps prevent problems caused by - Python 3.x version mismatches. - - - - - - - - Change the minimum supported Python - version to 3.6.8 (Jacob Champion) - § - - - - - - - - Remove support for OpenSSL versions older - than 1.1.1 (Daniel Gustafsson) - § - § - - - - - - - - If LLVM is enabled, require version 14 - or later (Thomas Munro) - § - - - - - - - - Add macro PG_MODULE_MAGIC_EXT - to allow extensions to report their name and version (Andrei Lepikhov) - § - - - - This information can be access via the new function pg_get_loaded_modules(). - - - - - - - - Document that SPI_connect()/SPI_connect_ext() - always returns success (SPI_OK_CONNECT) (Stepan - Neretin) - § - - - - Errors are always reported via ereport(). - - - - - - - - Add documentation - section about API and ABI - compatibility (David Wheeler, Peter Eisentraut) - § - - - - - - - - Remove the experimental designation of - Meson builds on Windows (Aleksander Alekseev) - § - - - - - - - - Remove configure options and - (Thomas Munro) - § - § - - - - Thirty-two-bit atomic operations are now required. - - - - - - - - Remove support for the - HPPA/PA-RISC architecture - (Tom Lane) - § - - - - - - - - - Additional Modules - - - - - - - - Add extension to inspect logical - snapshots (Bertrand Drouvot) - § - - - - - - - - Add extension which adds debug details - to EXPLAIN - output (Robert Haas) - § - - - - - - - - Add output columns to postgres_fdw_get_connections() - (Hayato Kuroda, Sagar Dilip Shedge) - § - § - § - § - - - - New output column used_in_xact indicates - if the foreign data wrapper is being used by a current transaction, - closed indicates if it is closed, - user_name indicates the user name, and - remote_backend_pid indicates the remote - backend process identifier. - - - - - - - - Allow SCRAM - authentication from the client to be passed to servers (Matheus Alcantara, Peter Eisentraut) - § - - - - This avoids storing postgres_fdw - authentication information in the database, and is - enabled with the postgres_fdw use_scram_passthrough - connection option. libpq uses new connection parameters - and . - - - - - - - - Allow SCRAM authentication from the client to be - passed to servers (Matheus Alcantara) - § - - - - - - - - Add on_error and log_verbosity - options to (Atsushi Torikoshi) - § - - - - These control how file_fdw handles and - reports invalid file rows. - - - - - - - - Add reject_limit to control the number of - invalid rows file_fdw can ignore (Atsushi - Torikoshi) - § - - - - This is active when ON_ERROR = 'ignore'. - - - - - - - - Add configurable variable min_password_length to - (Emanuele Musella, Maurizio Boriani) - § - - - - This controls the minimum password length. - - - - - - - - Have report the number of failed, retried, - or skipped transactions in per-script reports (Yugo Nagata) - § - - - - - - - - Add server variable weak - to control invalid check digit acceptance (Viktor Holmberg) - § - - - - This was previously only controlled by function isn_weak(). - - - - - - - - Allow values to be sorted to speed - index builds (Bernd Helmle, Andrey Borodin) - § - - - - - - - - Add check function gin_index_check() - to verify GIN indexes (Grigory Kryachko, Heikki - Linnakangas, Andrey Borodin) - § - - - - - - - - Add functions pg_buffercache_evict_relation() - and pg_buffercache_evict_all() - to evict unpinned shared buffers (Nazir Bilal Yavuz) - § - - - - The existing function pg_buffercache_evict() - now returns the buffer flush status. - - - - - - - - Allow extensions to install custom - options (Robert Haas, Sami Imseih) - § - § - § - - - - - - - - Allow extensions to use the server's cumulative statistics - API (Michael Paquier) - § - § - - - - - - - <xref linkend="pgstatstatements"/> - - - - - - - - Allow the queries of - and to be tracked by - pg_stat_statements (Anthonin Bonnefoy) - § - - - - They are also now assigned query ids. - - - - - - - - Allow the parameterization of values in - pg_stat_statements (Greg Sabino Mullane, - Michael Paquier) - § - - - - This reduces the bloat caused by SET statements - with differing constants. - - - - - - - - Add pg_stat_statements - columns to report parallel activity (Guillaume Lelarge) - § - - - - The new columns are - parallel_workers_to_launch and - parallel_workers_launched. - - - - - - - - Add - pg_stat_statements.wal_buffers_full - to report full WAL buffers (Bertrand Drouvot) - § - - - - - - - - - <xref linkend="pgcrypto"/> - - - - - - - - Add pgcrypto algorithms sha256crypt - and sha512crypt - (Bernd Helmle) - § - - - - - - - - Add CFB mode - to pgcrypto encryption and decryption - (Umar Hayat) - § - - - - - - - - Add function fips_mode() - to report the server's FIPS mode (Daniel - Gustafsson) - § - - - - - - - - Add pgcrypto server variable builtin_crypto_enabled - to allow disabling builtin non-FIPS mode - cryptographic functions (Daniel Gustafsson, Joe Conway) - § - - - - This is useful for guaranteeing FIPS mode behavior. - - - - - - - - - - - - Acknowledgments - - - The following individuals (in alphabetical order) have contributed - to this release as patch authors, committers, reviewers, testers, - or reporters of issues. - - - - (to be completed) - - - - diff --git a/doc/src/sgml/release-19.sgml b/doc/src/sgml/release-19.sgml new file mode 100644 index 0000000000000..8d242b5b28141 --- /dev/null +++ b/doc/src/sgml/release-19.sgml @@ -0,0 +1,16 @@ + + + + + Release 19 + + + Release date: + 2026-??-?? + + + + This is just a placeholder for now. + + + diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml index cee577ff8d353..a659d382db95c 100644 --- a/doc/src/sgml/release.sgml +++ b/doc/src/sgml/release.sgml @@ -70,7 +70,7 @@ For new features, add links to the documentation sections. All the active branches have to be edited concurrently when doing that. --> -&release-18; +&release-19; Prior Releases diff --git a/meson.build b/meson.build index 6ffe7b4727556..36e168a1a2ace 100644 --- a/meson.build +++ b/meson.build @@ -8,7 +8,7 @@ project('postgresql', ['c'], - version: '18beta1', + version: '19devel', license: 'PostgreSQL', # We want < 0.56 for python 3.5 compatibility on old platforms. EPEL for diff --git a/src/tools/git_changelog b/src/tools/git_changelog index b8bd874f20858..dccf938685a3a 100755 --- a/src/tools/git_changelog +++ b/src/tools/git_changelog @@ -59,6 +59,7 @@ require IPC::Open2; # (We could get this from "git branches", but not worth the trouble.) # NB: master must be first! my @BRANCHES = qw(master + REL_18_STABLE REL_17_STABLE REL_16_STABLE REL_15_STABLE REL_14_STABLE REL_13_STABLE REL_12_STABLE REL_11_STABLE REL_10_STABLE REL9_6_STABLE REL9_5_STABLE REL9_4_STABLE REL9_3_STABLE REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl index c3509474d83b2..a9d2d0910f3af 100755 --- a/src/tools/version_stamp.pl +++ b/src/tools/version_stamp.pl @@ -25,7 +25,7 @@ # Major version is hard-wired into the script. We update it when we branch # a new development version. -my $majorversion = 18; +my $majorversion = 19; # Validate argument and compute derived variables my $minor = shift; From 5ba00e175a4eaefa4dc38ea14c667bbeb13af305 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Mon, 30 Jun 2025 13:56:31 +0900 Subject: [PATCH 092/181] Align log_line_prefix in CI and TAP tests with pg_regress.c log_line_prefix is changed to include "%b", the backend type in the TAP test configuration. %v and %x are removed from the CI configuration, with the format around %b changed. The lack of backend type in postgresql.conf set by Cluster.pm for the TAP test configuration was something that has been bugging me, beginning the discussion that has led to this change. The change in the CI has come up during the discussion, to become consistent with pg_regress.c, %v and %x not being that useful to have. Reviewed-by: Andres Freund Reviewed-by: Fujii Masao Reviewed-by: Daniel Gustafsson Reviewed-by: Tom Lane Discussion: https://postgr.es/m/aC0VaIWAXLgXcHVP@paquier.xyz --- src/test/perl/PostgreSQL/Test/Cluster.pm | 2 +- src/tools/ci/pg_ci_base.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index 1c11750ac1d07..49b2c86b29cbf 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -684,7 +684,7 @@ sub init print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n"; print $conf "fsync = off\n"; print $conf "restart_after_crash = off\n"; - print $conf "log_line_prefix = '%m [%p] %q%a '\n"; + print $conf "log_line_prefix = '%m %b[%p] %q%a '\n"; print $conf "log_statement = all\n"; print $conf "log_replication_commands = on\n"; print $conf "wal_retrieve_retry_interval = '500ms'\n"; diff --git a/src/tools/ci/pg_ci_base.conf b/src/tools/ci/pg_ci_base.conf index 9cec5c2910d80..695e0a0d6ec9e 100644 --- a/src/tools/ci/pg_ci_base.conf +++ b/src/tools/ci/pg_ci_base.conf @@ -10,5 +10,5 @@ log_autovacuum_min_duration = 0 log_checkpoints = true log_connections = all log_disconnections = true -log_line_prefix = '%m [%p][%b] %q[%a][%v:%x] ' +log_line_prefix = '%m %b[%p] %q%a ' log_lock_waits = true From 2252fcd4276cfeabae8786ab7c5a421dd674743e Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Mon, 30 Jun 2025 15:42:50 +0900 Subject: [PATCH 093/181] Rationalize handling of VacuumParams This commit refactors the vacuum routines that rely on VacuumParams, adding const markers where necessary to force a new policy in the code. This structure should not use a pointer as it may be used across multiple relations, and its contents should never be updated. vacuum_rel() stands as an exception as it touches the "index_cleanup" and "truncate" options. VacuumParams has been introduced in 0d831389749a, and 661643dedad9 has fixed a bug impacting VACUUM operating on multiple relations. The changes done in tableam.h break ABI compatibility, so this commit can only happen on HEAD. Author: Shihao Zhong Co-authored-by: Michael Paquier Reviewed-by: Nathan Bossart Reviewed-by: Junwang Zhao Discussion: https://postgr.es/m/CAGRkXqTo+aK=GTy5pSc-9cy8H2F2TJvcrZ-zXEiNJj93np1UUw@mail.gmail.com --- src/backend/access/heap/vacuumlazy.c | 44 +++++----- src/backend/commands/analyze.c | 26 +++--- src/backend/commands/cluster.c | 2 +- src/backend/commands/vacuum.c | 118 ++++++++++++--------------- src/backend/postmaster/autovacuum.c | 2 +- src/include/access/heapam.h | 4 +- src/include/access/tableam.h | 6 +- src/include/commands/vacuum.h | 6 +- 8 files changed, 98 insertions(+), 110 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 4111a8996b5a1..75979530897cd 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -423,7 +423,7 @@ typedef struct LVSavedErrInfo /* non-export function prototypes */ static void lazy_scan_heap(LVRelState *vacrel); static void heap_vacuum_eager_scan_setup(LVRelState *vacrel, - VacuumParams *params); + const VacuumParams params); static BlockNumber heap_vac_scan_next_block(ReadStream *stream, void *callback_private_data, void *per_buffer_data); @@ -485,7 +485,7 @@ static void restore_vacuum_error_info(LVRelState *vacrel, * vacuum options or for relfrozenxid/relminmxid advancement. */ static void -heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params) +heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params) { uint32 randseed; BlockNumber allvisible; @@ -504,7 +504,7 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params) vacrel->eager_scan_remaining_successes = 0; /* If eager scanning is explicitly disabled, just return. */ - if (params->max_eager_freeze_failure_rate == 0) + if (params.max_eager_freeze_failure_rate == 0) return; /* @@ -581,11 +581,11 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params) vacrel->next_eager_scan_region_start = randseed % EAGER_SCAN_REGION_SIZE; - Assert(params->max_eager_freeze_failure_rate > 0 && - params->max_eager_freeze_failure_rate <= 1); + Assert(params.max_eager_freeze_failure_rate > 0 && + params.max_eager_freeze_failure_rate <= 1); vacrel->eager_scan_max_fails_per_region = - params->max_eager_freeze_failure_rate * + params.max_eager_freeze_failure_rate * EAGER_SCAN_REGION_SIZE; /* @@ -612,7 +612,7 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params) * and locked the relation. */ void -heap_vacuum_rel(Relation rel, VacuumParams *params, +heap_vacuum_rel(Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy) { LVRelState *vacrel; @@ -634,9 +634,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, ErrorContextCallback errcallback; char **indnames = NULL; - verbose = (params->options & VACOPT_VERBOSE) != 0; + verbose = (params.options & VACOPT_VERBOSE) != 0; instrument = (verbose || (AmAutoVacuumWorkerProcess() && - params->log_min_duration >= 0)); + params.log_min_duration >= 0)); if (instrument) { pg_rusage_init(&ru0); @@ -699,9 +699,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, * The truncate param allows user to avoid attempting relation truncation, * though it can't force truncation to happen. */ - Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED); - Assert(params->truncate != VACOPTVALUE_UNSPECIFIED && - params->truncate != VACOPTVALUE_AUTO); + Assert(params.index_cleanup != VACOPTVALUE_UNSPECIFIED); + Assert(params.truncate != VACOPTVALUE_UNSPECIFIED && + params.truncate != VACOPTVALUE_AUTO); /* * While VacuumFailSafeActive is reset to false before calling this, we @@ -711,14 +711,14 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, vacrel->consider_bypass_optimization = true; vacrel->do_index_vacuuming = true; vacrel->do_index_cleanup = true; - vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED); - if (params->index_cleanup == VACOPTVALUE_DISABLED) + vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED); + if (params.index_cleanup == VACOPTVALUE_DISABLED) { /* Force disable index vacuuming up-front */ vacrel->do_index_vacuuming = false; vacrel->do_index_cleanup = false; } - else if (params->index_cleanup == VACOPTVALUE_ENABLED) + else if (params.index_cleanup == VACOPTVALUE_ENABLED) { /* Force index vacuuming. Note that failsafe can still bypass. */ vacrel->consider_bypass_optimization = false; @@ -726,7 +726,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, else { /* Default/auto, make all decisions dynamically */ - Assert(params->index_cleanup == VACOPTVALUE_AUTO); + Assert(params.index_cleanup == VACOPTVALUE_AUTO); } /* Initialize page counters explicitly (be tidy) */ @@ -789,7 +789,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, */ vacrel->skippedallvis = false; skipwithvm = true; - if (params->options & VACOPT_DISABLE_PAGE_SKIPPING) + if (params.options & VACOPT_DISABLE_PAGE_SKIPPING) { /* * Force aggressive mode, and disable skipping blocks using the @@ -830,7 +830,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, * is already dangerously old.) */ lazy_check_wraparound_failsafe(vacrel); - dead_items_alloc(vacrel, params->nworkers); + dead_items_alloc(vacrel, params.nworkers); /* * Call lazy_scan_heap to perform all required heap pruning, index @@ -947,9 +947,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, { TimestampTz endtime = GetCurrentTimestamp(); - if (verbose || params->log_min_duration == 0 || + if (verbose || params.log_min_duration == 0 || TimestampDifferenceExceeds(starttime, endtime, - params->log_min_duration)) + params.log_min_duration)) { long secs_dur; int usecs_dur; @@ -984,10 +984,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, * Aggressiveness already reported earlier, in dedicated * VACUUM VERBOSE ereport */ - Assert(!params->is_wraparound); + Assert(!params.is_wraparound); msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n"); } - else if (params->is_wraparound) + else if (params.is_wraparound) { /* * While it's possible for a VACUUM to be both is_wraparound diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 4fffb76e55735..7111d5d5334f2 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -76,7 +76,7 @@ static BufferAccessStrategy vac_strategy; static void do_analyze_rel(Relation onerel, - VacuumParams *params, List *va_cols, + const VacuumParams params, List *va_cols, AcquireSampleRowsFunc acquirefunc, BlockNumber relpages, bool inh, bool in_outer_xact, int elevel); static void compute_index_stats(Relation onerel, double totalrows, @@ -107,7 +107,7 @@ static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull); */ void analyze_rel(Oid relid, RangeVar *relation, - VacuumParams *params, List *va_cols, bool in_outer_xact, + const VacuumParams params, List *va_cols, bool in_outer_xact, BufferAccessStrategy bstrategy) { Relation onerel; @@ -116,7 +116,7 @@ analyze_rel(Oid relid, RangeVar *relation, BlockNumber relpages = 0; /* Select logging level */ - if (params->options & VACOPT_VERBOSE) + if (params.options & VACOPT_VERBOSE) elevel = INFO; else elevel = DEBUG2; @@ -138,8 +138,8 @@ analyze_rel(Oid relid, RangeVar *relation, * * Make sure to generate only logs for ANALYZE in this case. */ - onerel = vacuum_open_relation(relid, relation, params->options & ~(VACOPT_VACUUM), - params->log_min_duration >= 0, + onerel = vacuum_open_relation(relid, relation, params.options & ~(VACOPT_VACUUM), + params.log_min_duration >= 0, ShareUpdateExclusiveLock); /* leave if relation could not be opened or locked */ @@ -155,7 +155,7 @@ analyze_rel(Oid relid, RangeVar *relation, */ if (!vacuum_is_permitted_for_relation(RelationGetRelid(onerel), onerel->rd_rel, - params->options & ~VACOPT_VACUUM)) + params.options & ~VACOPT_VACUUM)) { relation_close(onerel, ShareUpdateExclusiveLock); return; @@ -227,7 +227,7 @@ analyze_rel(Oid relid, RangeVar *relation, else { /* No need for a WARNING if we already complained during VACUUM */ - if (!(params->options & VACOPT_VACUUM)) + if (!(params.options & VACOPT_VACUUM)) ereport(WARNING, (errmsg("skipping \"%s\" --- cannot analyze non-tables or special system tables", RelationGetRelationName(onerel)))); @@ -275,7 +275,7 @@ analyze_rel(Oid relid, RangeVar *relation, * appropriate acquirefunc for each child table. */ static void -do_analyze_rel(Relation onerel, VacuumParams *params, +do_analyze_rel(Relation onerel, const VacuumParams params, List *va_cols, AcquireSampleRowsFunc acquirefunc, BlockNumber relpages, bool inh, bool in_outer_xact, int elevel) @@ -309,9 +309,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params, PgStat_Counter startreadtime = 0; PgStat_Counter startwritetime = 0; - verbose = (params->options & VACOPT_VERBOSE) != 0; + verbose = (params.options & VACOPT_VERBOSE) != 0; instrument = (verbose || (AmAutoVacuumWorkerProcess() && - params->log_min_duration >= 0)); + params.log_min_duration >= 0)); if (inh) ereport(elevel, (errmsg("analyzing \"%s.%s\" inheritance tree", @@ -706,7 +706,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params, * amvacuumcleanup() when called in ANALYZE-only mode. The only exception * among core index AMs is GIN/ginvacuumcleanup(). */ - if (!(params->options & VACOPT_VACUUM)) + if (!(params.options & VACOPT_VACUUM)) { for (ind = 0; ind < nindexes; ind++) { @@ -736,9 +736,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params, { TimestampTz endtime = GetCurrentTimestamp(); - if (verbose || params->log_min_duration == 0 || + if (verbose || params.log_min_duration == 0 || TimestampDifferenceExceeds(starttime, endtime, - params->log_min_duration)) + params.log_min_duration)) { long delay_in_ms; WalUsage walusage; diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 54a08e4102e14..b55221d44cd00 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -917,7 +917,7 @@ copy_table_data(Relation NewHeap, Relation OldHeap, Relation OldIndex, bool verb * not to be aggressive about this. */ memset(¶ms, 0, sizeof(VacuumParams)); - vacuum_get_cutoffs(OldHeap, ¶ms, &cutoffs); + vacuum_get_cutoffs(OldHeap, params, &cutoffs); /* * FreezeXid will become the table's new relfrozenxid, and that mustn't go diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 02993d320dafc..733ef40ae7c52 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -124,7 +124,7 @@ static void vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti, TransactionId lastSaneFrozenXid, MultiXactId lastSaneMinMulti); -static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, +static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams params, BufferAccessStrategy bstrategy); static double compute_parallel_delay(void); static VacOptValue get_vacoptval_from_boolean(DefElem *def); @@ -465,7 +465,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) } /* Now go through the common routine */ - vacuum(vacstmt->rels, ¶ms, bstrategy, vac_context, isTopLevel); + vacuum(vacstmt->rels, params, bstrategy, vac_context, isTopLevel); /* Finally, clean up the vacuum memory context */ MemoryContextDelete(vac_context); @@ -494,7 +494,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) * memory context that will not disappear at transaction commit. */ void -vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, +vacuum(List *relations, const VacuumParams params, BufferAccessStrategy bstrategy, MemoryContext vac_context, bool isTopLevel) { static bool in_vacuum = false; @@ -503,9 +503,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, volatile bool in_outer_xact, use_own_xacts; - Assert(params != NULL); - - stmttype = (params->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; + stmttype = (params.options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; /* * We cannot run VACUUM inside a user transaction block; if we were inside @@ -515,7 +513,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, * * ANALYZE (without VACUUM) can run either way. */ - if (params->options & VACOPT_VACUUM) + if (params.options & VACOPT_VACUUM) { PreventInTransactionBlock(isTopLevel, stmttype); in_outer_xact = false; @@ -538,7 +536,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, * Build list of relation(s) to process, putting any new data in * vac_context for safekeeping. */ - if (params->options & VACOPT_ONLY_DATABASE_STATS) + if (params.options & VACOPT_ONLY_DATABASE_STATS) { /* We don't process any tables in this case */ Assert(relations == NIL); @@ -554,7 +552,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, List *sublist; MemoryContext old_context; - sublist = expand_vacuum_rel(vrel, vac_context, params->options); + sublist = expand_vacuum_rel(vrel, vac_context, params.options); old_context = MemoryContextSwitchTo(vac_context); newrels = list_concat(newrels, sublist); MemoryContextSwitchTo(old_context); @@ -562,7 +560,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, relations = newrels; } else - relations = get_all_vacuum_rels(vac_context, params->options); + relations = get_all_vacuum_rels(vac_context, params.options); /* * Decide whether we need to start/commit our own transactions. @@ -578,11 +576,11 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, * transaction block, and also in an autovacuum worker, use own * transactions so we can release locks sooner. */ - if (params->options & VACOPT_VACUUM) + if (params.options & VACOPT_VACUUM) use_own_xacts = true; else { - Assert(params->options & VACOPT_ANALYZE); + Assert(params.options & VACOPT_ANALYZE); if (AmAutoVacuumWorkerProcess()) use_own_xacts = true; else if (in_outer_xact) @@ -633,21 +631,13 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, { VacuumRelation *vrel = lfirst_node(VacuumRelation, cur); - if (params->options & VACOPT_VACUUM) + if (params.options & VACOPT_VACUUM) { - VacuumParams params_copy; - - /* - * vacuum_rel() scribbles on the parameters, so give it a copy - * to avoid affecting other relations. - */ - memcpy(¶ms_copy, params, sizeof(VacuumParams)); - - if (!vacuum_rel(vrel->oid, vrel->relation, ¶ms_copy, bstrategy)) + if (!vacuum_rel(vrel->oid, vrel->relation, params, bstrategy)) continue; } - if (params->options & VACOPT_ANALYZE) + if (params.options & VACOPT_ANALYZE) { /* * If using separate xacts, start one for analyze. Otherwise, @@ -711,8 +701,8 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, StartTransactionCommand(); } - if ((params->options & VACOPT_VACUUM) && - !(params->options & VACOPT_SKIP_DATABASE_STATS)) + if ((params.options & VACOPT_VACUUM) && + !(params.options & VACOPT_SKIP_DATABASE_STATS)) { /* * Update pg_database.datfrozenxid, and truncate pg_xact if possible. @@ -1110,7 +1100,7 @@ get_all_vacuum_rels(MemoryContext vac_context, int options) * minimum). */ bool -vacuum_get_cutoffs(Relation rel, const VacuumParams *params, +vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs) { int freeze_min_age, @@ -1126,10 +1116,10 @@ vacuum_get_cutoffs(Relation rel, const VacuumParams *params, aggressiveMXIDCutoff; /* Use mutable copies of freeze age parameters */ - freeze_min_age = params->freeze_min_age; - multixact_freeze_min_age = params->multixact_freeze_min_age; - freeze_table_age = params->freeze_table_age; - multixact_freeze_table_age = params->multixact_freeze_table_age; + freeze_min_age = params.freeze_min_age; + multixact_freeze_min_age = params.multixact_freeze_min_age; + freeze_table_age = params.freeze_table_age; + multixact_freeze_table_age = params.multixact_freeze_table_age; /* Set pg_class fields in cutoffs */ cutoffs->relfrozenxid = rel->rd_rel->relfrozenxid; @@ -2006,7 +1996,7 @@ vac_truncate_clog(TransactionId frozenXID, * At entry and exit, we are not inside a transaction. */ static bool -vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, +vacuum_rel(Oid relid, RangeVar *relation, VacuumParams params, BufferAccessStrategy bstrategy) { LOCKMODE lmode; @@ -2019,18 +2009,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, int save_nestlevel; VacuumParams toast_vacuum_params; - Assert(params != NULL); - /* * This function scribbles on the parameters, so make a copy early to * avoid affecting the TOAST table (if we do end up recursing to it). */ - memcpy(&toast_vacuum_params, params, sizeof(VacuumParams)); + memcpy(&toast_vacuum_params, ¶ms, sizeof(VacuumParams)); /* Begin a transaction for vacuuming this relation */ StartTransactionCommand(); - if (!(params->options & VACOPT_FULL)) + if (!(params.options & VACOPT_FULL)) { /* * In lazy vacuum, we can set the PROC_IN_VACUUM flag, which lets @@ -2056,7 +2044,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, */ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); MyProc->statusFlags |= PROC_IN_VACUUM; - if (params->is_wraparound) + if (params.is_wraparound) MyProc->statusFlags |= PROC_VACUUM_FOR_WRAPAROUND; ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags; LWLockRelease(ProcArrayLock); @@ -2080,12 +2068,12 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either * way, we can be sure that no other backend is vacuuming the same table. */ - lmode = (params->options & VACOPT_FULL) ? + lmode = (params.options & VACOPT_FULL) ? AccessExclusiveLock : ShareUpdateExclusiveLock; /* open the relation and get the appropriate lock on it */ - rel = vacuum_open_relation(relid, relation, params->options, - params->log_min_duration >= 0, lmode); + rel = vacuum_open_relation(relid, relation, params.options, + params.log_min_duration >= 0, lmode); /* leave if relation could not be opened or locked */ if (!rel) @@ -2100,8 +2088,8 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, * This is only safe to do because we hold a session lock on the main * relation that prevents concurrent deletion. */ - if (OidIsValid(params->toast_parent)) - priv_relid = params->toast_parent; + if (OidIsValid(params.toast_parent)) + priv_relid = params.toast_parent; else priv_relid = RelationGetRelid(rel); @@ -2114,7 +2102,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, */ if (!vacuum_is_permitted_for_relation(priv_relid, rel->rd_rel, - params->options & ~VACOPT_ANALYZE)) + params.options & ~VACOPT_ANALYZE)) { relation_close(rel, lmode); PopActiveSnapshot(); @@ -2185,7 +2173,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, * Set index_cleanup option based on index_cleanup reloption if it wasn't * specified in VACUUM command, or when running in an autovacuum worker */ - if (params->index_cleanup == VACOPTVALUE_UNSPECIFIED) + if (params.index_cleanup == VACOPTVALUE_UNSPECIFIED) { StdRdOptIndexCleanup vacuum_index_cleanup; @@ -2196,23 +2184,23 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, ((StdRdOptions *) rel->rd_options)->vacuum_index_cleanup; if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO) - params->index_cleanup = VACOPTVALUE_AUTO; + params.index_cleanup = VACOPTVALUE_AUTO; else if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON) - params->index_cleanup = VACOPTVALUE_ENABLED; + params.index_cleanup = VACOPTVALUE_ENABLED; else { Assert(vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF); - params->index_cleanup = VACOPTVALUE_DISABLED; + params.index_cleanup = VACOPTVALUE_DISABLED; } } #ifdef USE_INJECTION_POINTS - if (params->index_cleanup == VACOPTVALUE_AUTO) + if (params.index_cleanup == VACOPTVALUE_AUTO) INJECTION_POINT("vacuum-index-cleanup-auto", NULL); - else if (params->index_cleanup == VACOPTVALUE_DISABLED) + else if (params.index_cleanup == VACOPTVALUE_DISABLED) INJECTION_POINT("vacuum-index-cleanup-disabled", NULL); - else if (params->index_cleanup == VACOPTVALUE_ENABLED) + else if (params.index_cleanup == VACOPTVALUE_ENABLED) INJECTION_POINT("vacuum-index-cleanup-enabled", NULL); #endif @@ -2222,36 +2210,36 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, */ if (rel->rd_options != NULL && ((StdRdOptions *) rel->rd_options)->vacuum_max_eager_freeze_failure_rate >= 0) - params->max_eager_freeze_failure_rate = + params.max_eager_freeze_failure_rate = ((StdRdOptions *) rel->rd_options)->vacuum_max_eager_freeze_failure_rate; /* * Set truncate option based on truncate reloption or GUC if it wasn't * specified in VACUUM command, or when running in an autovacuum worker */ - if (params->truncate == VACOPTVALUE_UNSPECIFIED) + if (params.truncate == VACOPTVALUE_UNSPECIFIED) { StdRdOptions *opts = (StdRdOptions *) rel->rd_options; if (opts && opts->vacuum_truncate_set) { if (opts->vacuum_truncate) - params->truncate = VACOPTVALUE_ENABLED; + params.truncate = VACOPTVALUE_ENABLED; else - params->truncate = VACOPTVALUE_DISABLED; + params.truncate = VACOPTVALUE_DISABLED; } else if (vacuum_truncate) - params->truncate = VACOPTVALUE_ENABLED; + params.truncate = VACOPTVALUE_ENABLED; else - params->truncate = VACOPTVALUE_DISABLED; + params.truncate = VACOPTVALUE_DISABLED; } #ifdef USE_INJECTION_POINTS - if (params->truncate == VACOPTVALUE_AUTO) + if (params.truncate == VACOPTVALUE_AUTO) INJECTION_POINT("vacuum-truncate-auto", NULL); - else if (params->truncate == VACOPTVALUE_DISABLED) + else if (params.truncate == VACOPTVALUE_DISABLED) INJECTION_POINT("vacuum-truncate-disabled", NULL); - else if (params->truncate == VACOPTVALUE_ENABLED) + else if (params.truncate == VACOPTVALUE_ENABLED) INJECTION_POINT("vacuum-truncate-enabled", NULL); #endif @@ -2261,9 +2249,9 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, * automatically rebuilt by cluster_rel so we shouldn't recurse to it, * unless PROCESS_MAIN is disabled. */ - if ((params->options & VACOPT_PROCESS_TOAST) != 0 && - ((params->options & VACOPT_FULL) == 0 || - (params->options & VACOPT_PROCESS_MAIN) == 0)) + if ((params.options & VACOPT_PROCESS_TOAST) != 0 && + ((params.options & VACOPT_FULL) == 0 || + (params.options & VACOPT_PROCESS_MAIN) == 0)) toast_relid = rel->rd_rel->reltoastrelid; else toast_relid = InvalidOid; @@ -2286,16 +2274,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, * table is required (e.g., PROCESS_TOAST is set), we force PROCESS_MAIN * to be set when we recurse to the TOAST table. */ - if (params->options & VACOPT_PROCESS_MAIN) + if (params.options & VACOPT_PROCESS_MAIN) { /* * Do the actual work --- either FULL or "lazy" vacuum */ - if (params->options & VACOPT_FULL) + if (params.options & VACOPT_FULL) { ClusterParams cluster_params = {0}; - if ((params->options & VACOPT_VERBOSE) != 0) + if ((params.options & VACOPT_VERBOSE) != 0) cluster_params.options |= CLUOPT_VERBOSE; /* VACUUM FULL is now a variant of CLUSTER; see cluster.c */ @@ -2342,7 +2330,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params, toast_vacuum_params.options |= VACOPT_PROCESS_MAIN; toast_vacuum_params.toast_parent = relid; - vacuum_rel(toast_relid, NULL, &toast_vacuum_params, bstrategy); + vacuum_rel(toast_relid, NULL, toast_vacuum_params, bstrategy); } /* diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 451fb90a610a7..9474095f271a1 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -3190,7 +3190,7 @@ autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy) rel_list = list_make1(rel); MemoryContextSwitchTo(old_context); - vacuum(rel_list, &tab->at_params, bstrategy, vac_context, true); + vacuum(rel_list, tab->at_params, bstrategy, vac_context, true); MemoryContextDelete(vac_context); } diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 3a9424c19c9ae..a2bd5a897f874 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -21,6 +21,7 @@ #include "access/skey.h" #include "access/table.h" /* for backward compatibility */ #include "access/tableam.h" +#include "commands/vacuum.h" #include "nodes/lockoptions.h" #include "nodes/primnodes.h" #include "storage/bufpage.h" @@ -396,9 +397,8 @@ extern void log_heap_prune_and_freeze(Relation relation, Buffer buffer, OffsetNumber *unused, int nunused); /* in heap/vacuumlazy.c */ -struct VacuumParams; extern void heap_vacuum_rel(Relation rel, - struct VacuumParams *params, BufferAccessStrategy bstrategy); + const VacuumParams params, BufferAccessStrategy bstrategy); /* in heap/heapam_visibility.c */ extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 8713e12cbfb99..1c9e802a6b128 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -20,6 +20,7 @@ #include "access/relscan.h" #include "access/sdir.h" #include "access/xact.h" +#include "commands/vacuum.h" #include "executor/tuptable.h" #include "storage/read_stream.h" #include "utils/rel.h" @@ -36,7 +37,6 @@ extern PGDLLIMPORT bool synchronize_seqscans; struct BulkInsertStateData; struct IndexInfo; struct SampleScanState; -struct VacuumParams; struct ValidateIndexState; /* @@ -645,7 +645,7 @@ typedef struct TableAmRoutine * integrate with autovacuum's scheduling. */ void (*relation_vacuum) (Relation rel, - struct VacuumParams *params, + const VacuumParams params, BufferAccessStrategy bstrategy); /* @@ -1664,7 +1664,7 @@ table_relation_copy_for_cluster(Relation OldTable, Relation NewTable, * routine, even if (for ANALYZE) it is part of the same VACUUM command. */ static inline void -table_relation_vacuum(Relation rel, struct VacuumParams *params, +table_relation_vacuum(Relation rel, const VacuumParams params, BufferAccessStrategy bstrategy) { rel->rd_tableam->relation_vacuum(rel, params, bstrategy); diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index bc37a80dc74fa..14eeccbd71850 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -336,7 +336,7 @@ extern PGDLLIMPORT int64 parallel_vacuum_worker_delay_ns; /* in commands/vacuum.c */ extern void ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel); -extern void vacuum(List *relations, VacuumParams *params, +extern void vacuum(List *relations, const VacuumParams params, BufferAccessStrategy bstrategy, MemoryContext vac_context, bool isTopLevel); extern void vac_open_indexes(Relation relation, LOCKMODE lockmode, @@ -357,7 +357,7 @@ extern void vac_update_relstats(Relation relation, bool *frozenxid_updated, bool *minmulti_updated, bool in_outer_xact); -extern bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params, +extern bool vacuum_get_cutoffs(Relation rel, const VacuumParams params, struct VacuumCutoffs *cutoffs); extern bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs); extern void vac_update_datfrozenxid(void); @@ -398,7 +398,7 @@ extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc); /* in commands/analyze.c */ extern void analyze_rel(Oid relid, RangeVar *relation, - VacuumParams *params, List *va_cols, bool in_outer_xact, + const VacuumParams params, List *va_cols, bool in_outer_xact, BufferAccessStrategy bstrategy); extern bool std_typanalyze(VacAttrStats *stats); From c5c4fbb4d482b87c2a6c90337f3b657b2d0002ca Mon Sep 17 00:00:00 2001 From: Daniel Gustafsson Date: Mon, 30 Jun 2025 10:12:31 +0200 Subject: [PATCH 094/181] doc: Fix typo in pg_sync_replication_slots documentation Commit 1546e17f9d0 accidentally misspelled additionally as additionaly. Backpatch to v17 to match where the original commit was backpatched. Author: Daniel Gustafsson Backpatch-through: 17 --- doc/src/sgml/func.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 224d4fe5a9f95..298791858be30 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -29981,7 +29981,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset logical decoding and must be dropped after promotion. See for details. Note that this function is primarily intended for testing and - debugging purposes and should be used with caution. Additionaly, + debugging purposes and should be used with caution. Additionally, this function cannot be executed if sync_replication_slots is enabled and the slotsync From 2e640a0fa224e4233220252b360efd33c98b3e90 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 30 Jun 2025 10:32:26 +0200 Subject: [PATCH 095/181] doc: Some copy-editing around prefix operators When postfix operators where dropped in 1ed6b8956, the CREATE OPERATOR docs were not updated to make the RIGHTARG argument mandatory in the grammar. While at it, make the RIGHTARG docs more concise. Also, the operator docs were mentioning "infix" in the introduction, while using "binary" everywhere else. Author: Christoph Berg Discussion: https://www.postgresql.org/message-id/flat/aAtpbnQphv4LWAye@msg.df7cb.de --- doc/src/sgml/ref/create_operator.sgml | 6 +++--- doc/src/sgml/xoper.sgml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/src/sgml/ref/create_operator.sgml b/doc/src/sgml/ref/create_operator.sgml index 3553d36454185..d2ffb1b2a500f 100644 --- a/doc/src/sgml/ref/create_operator.sgml +++ b/doc/src/sgml/ref/create_operator.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation CREATE OPERATOR name ( {FUNCTION|PROCEDURE} = function_name - [, LEFTARG = left_type ] [, RIGHTARG = right_type ] + [, LEFTARG = left_type ] , RIGHTARG = right_type [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ] [, RESTRICT = res_proc ] [, JOIN = join_proc ] [, HASHES ] [, MERGES ] @@ -88,8 +88,8 @@ CREATE OPERATOR name ( For binary operators, both LEFTARG and - RIGHTARG must be defined. For prefix operators only - RIGHTARG should be defined. + RIGHTARG must be defined. For prefix operators, only + RIGHTARG must be defined. The function_name function must have been previously defined using CREATE FUNCTION and must be defined to accept the correct number diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index 954a90d77d0ed..853b07a9f1489 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -21,7 +21,7 @@ PostgreSQL supports prefix - and infix operators. Operators can be + and binary (or infix) operators. Operators can be overloaded;overloadingoperators that is, the same operator name can be used for different operators that have different numbers and types of operands. When a query is From 3431e3e4aa3a33e8411f15e76c284cdd4c54ca28 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 30 Jun 2025 10:45:08 +0200 Subject: [PATCH 096/181] pgbench: Use standard option handling test routines Run program_XXX tests instead of its own tests. This ensures consistency with the test suites of other programs and enforces common policies, such as help line length. Author: Hayato Kuroda Reviewed-by: Fujii Masao Discussion: https://www.postgresql.org/message-id/flat/OSCPR01MB14966247015B7E3D8D340D022F56FA@OSCPR01MB14966.jpnprd01.prod.outlook.com --- src/bin/pgbench/t/002_pgbench_no_server.pl | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl index f975c73dd758a..2cc59cc8140c3 100644 --- a/src/bin/pgbench/t/002_pgbench_no_server.pl +++ b/src/bin/pgbench/t/002_pgbench_no_server.pl @@ -233,21 +233,9 @@ sub pgbench_scripts 'pgbench option error: ' . $name); } -# Help -pgbench( - '--help', 0, - [ - qr{benchmarking tool for PostgreSQL}, - qr{Usage}, - qr{Initialization options:}, - qr{Common options:}, - qr{Report bugs to} - ], - [qr{^$}], - 'pgbench help'); - -# Version -pgbench('-V', 0, [qr{^pgbench .PostgreSQL. }], [qr{^$}], 'pgbench version'); +program_help_ok('pgbench'); +program_version_ok('pgbench'); +program_options_handling_ok('pgbench'); # list of builtins pgbench( From 960135114629bc89da0dd1d839541098c7e6401a Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 30 Jun 2025 11:28:11 +0200 Subject: [PATCH 097/181] doc: explain pgstatindex fragmentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It was quite hard to guess what leaf_fragmentation meant without looking at pgstattuple's code. This patch aims to give to the user a better idea of what it means. Author: Frédéric Yhuel Author: Laurenz Albe Reviewed-by: Bertrand Drouvot Reviewed-by: Benoit Lobréau Discussion: https://postgr.es/m/bf110561-f774-4957-a890-bb6fab6804e0%40dalibo.com Discussion: https://postgr.es/m/4c5dee3a-8381-4e0f-b882-d1bd950e8972@dalibo.com --- doc/src/sgml/pgstattuple.sgml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/src/sgml/pgstattuple.sgml b/doc/src/sgml/pgstattuple.sgml index 4071da4ed941a..c747a5818ab7d 100644 --- a/doc/src/sgml/pgstattuple.sgml +++ b/doc/src/sgml/pgstattuple.sgml @@ -270,6 +270,15 @@ leaf_fragmentation | 0 page than is accounted for by internal_pages + leaf_pages + empty_pages + deleted_pages, because it also includes the index's metapage. + avg_leaf_density is the fraction of the index size that + is taken up by user data. Since indexes have a default fillfactor of 90, + this should be around 90 for newly built indexes of non-negligible size, + but usually deteriorates over time. + leaf_fragmentation represents a measure of disorder. + A higher leaf_fragmentation indicates that the + physical order of the index leaf pages increasingly deviates from their + logical order. This can have a significant impact if a large part + of the index is read from disk. From a4c10de9291291bce3dd2b81bd8b5f0b98649244 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Mon, 30 Jun 2025 18:36:24 +0900 Subject: [PATCH 098/181] psql: Improve tab completion for COPY command. Previously, tab completion for COPY only suggested plain tables and partitioned tables, even though materialized views are also valid for COPY TO (since commit 534874fac0b), and foreign tables are valid for COPY FROM. This commit enhances tab completion for COPY to also include materialized views and foreign tables. Views with INSTEAD OF INSERT triggers are supported with COPY FROM but rarely used, so plain views are intentionally excluded from completion. Author: jian he Co-authored-by: Fujii Masao Reviewed-by: Kirill Reshke Reviewed-by: David G. Johnston Discussion: https://postgr.es/m/CACJufxFxnSkikp+GormAGHcMTX1YH2HRXW1+3dJM9w7yY9hdsg@mail.gmail.com --- src/bin/psql/tab-complete.in.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index 908eef97c6e28..8c2ea0b95870a 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -889,6 +889,14 @@ static const SchemaQuery Query_for_list_of_analyzables = { .result = "c.relname", }; +/* + * Relations supporting COPY TO/FROM are currently almost the same as + * those supporting ANALYZE. Although views with INSTEAD OF INSERT triggers + * can be used with COPY FROM, they are rarely used for this purpose, + * so plain views are intentionally excluded from this tab completion. + */ +#define Query_for_list_of_tables_for_copy Query_for_list_of_analyzables + /* Relations supporting index creation */ static const SchemaQuery Query_for_list_of_indexables = { .catname = "pg_catalog.pg_class c", @@ -3255,7 +3263,7 @@ match_previous_words(int pattern_id, * backslash command). */ else if (Matches("COPY|\\copy")) - COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables, "("); + COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables_for_copy, "("); /* Complete COPY ( with legal query commands */ else if (Matches("COPY|\\copy", "(")) COMPLETE_WITH("SELECT", "TABLE", "VALUES", "INSERT INTO", "UPDATE", "DELETE FROM", "MERGE INTO", "WITH"); From a6a4641252ed166ba187d7fbe0504ddb5a5f0e33 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 30 Jun 2025 11:38:18 +0200 Subject: [PATCH 099/181] Fix whitespace --- src/tools/git_changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tools/git_changelog b/src/tools/git_changelog index dccf938685a3a..c25e399a87f5d 100755 --- a/src/tools/git_changelog +++ b/src/tools/git_changelog @@ -59,7 +59,7 @@ require IPC::Open2; # (We could get this from "git branches", but not worth the trouble.) # NB: master must be first! my @BRANCHES = qw(master - REL_18_STABLE + REL_18_STABLE REL_17_STABLE REL_16_STABLE REL_15_STABLE REL_14_STABLE REL_13_STABLE REL_12_STABLE REL_11_STABLE REL_10_STABLE REL9_6_STABLE REL9_5_STABLE REL9_4_STABLE REL9_3_STABLE REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE From cc2ac0e6f99e4efc3ae5710010ff35e646990a60 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 30 Jun 2025 12:00:00 +0200 Subject: [PATCH 100/181] Remove unused #include's in src/backend/utils/adt/* Author: Aleksander Alekseev Reviewed-by: Tom Lane Discussion: https://postgr.es/m/CAJ7c6TOowVbR-0NEvvDm6a_mag18krR0XJ2FKrc9DHXj7hFRtQ%40mail.gmail.com --- src/backend/utils/adt/network.c | 2 -- src/backend/utils/adt/network_spgist.c | 1 - src/backend/utils/adt/pg_locale.c | 1 - src/backend/utils/adt/pg_locale_builtin.c | 1 - src/backend/utils/adt/ri_triggers.c | 2 -- src/backend/utils/adt/selfuncs.c | 1 - 6 files changed, 8 deletions(-) diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index f03fcc1147bb0..9fd211b2d4576 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -12,8 +12,6 @@ #include #include -#include "access/stratnum.h" -#include "catalog/pg_opfamily.h" #include "catalog/pg_type.h" #include "common/hashfn.h" #include "common/ip.h" diff --git a/src/backend/utils/adt/network_spgist.c b/src/backend/utils/adt/network_spgist.c index a84747d927586..602276a35c3ea 100644 --- a/src/backend/utils/adt/network_spgist.c +++ b/src/backend/utils/adt/network_spgist.c @@ -37,7 +37,6 @@ #include "catalog/pg_type.h" #include "utils/fmgrprotos.h" #include "utils/inet.h" -#include "varatt.h" static int inet_spg_node_number(const inet *val, int commonbits); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index f5e31c433a0de..bf1afb24d7da9 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -41,7 +41,6 @@ #include "mb/pg_wchar.h" #include "miscadmin.h" #include "utils/builtins.h" -#include "utils/formatting.h" #include "utils/guc_hooks.h" #include "utils/lsyscache.h" #include "utils/memutils.h" diff --git a/src/backend/utils/adt/pg_locale_builtin.c b/src/backend/utils/adt/pg_locale_builtin.c index f51768830cd7b..ce4914a76a12e 100644 --- a/src/backend/utils/adt/pg_locale_builtin.c +++ b/src/backend/utils/adt/pg_locale_builtin.c @@ -18,7 +18,6 @@ #include "mb/pg_wchar.h" #include "miscadmin.h" #include "utils/builtins.h" -#include "utils/memutils.h" #include "utils/pg_locale.h" #include "utils/syscache.h" diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 6239900fa2892..059fc5ebf601a 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -30,7 +30,6 @@ #include "access/xact.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_proc.h" #include "commands/trigger.h" #include "executor/executor.h" #include "executor/spi.h" @@ -46,7 +45,6 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" -#include "utils/rangetypes.h" #include "utils/rel.h" #include "utils/rls.h" #include "utils/ruleutils.h" diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index a96b1b9c0bc69..1e0f2de0336b0 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -103,7 +103,6 @@ #include "access/table.h" #include "access/tableam.h" #include "access/visibilitymap.h" -#include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_operator.h" #include "catalog/pg_statistic.h" From 40a96cd1484fdf3ab57e8cb7b09767ec7a7f73b1 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 30 Jun 2025 12:23:33 +0200 Subject: [PATCH 101/181] pgflex: propagate environment to flex subprocess MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Python's subprocess.run docs say that if the env argument is not None, it will be used "instead of the default behavior of inheriting the current process’ environment". However, the environment should be preserved, only adding FLEX_TMP_DIR to it. Author: Javier Maestro Discussion: https://www.postgresql.org/message-id/flat/CABvji06GUpmrTqqiCr6_F9vRL2-JUSVAh8ChgWa6k47FUCvYmA%40mail.gmail.com --- src/tools/pgflex | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tools/pgflex b/src/tools/pgflex index 3986b06874e75..b8d9aa0086fbb 100755 --- a/src/tools/pgflex +++ b/src/tools/pgflex @@ -48,7 +48,7 @@ os.chdir(args.privatedir) # contents. Set FLEX_TMP_DIR to the target private directory to avoid # that. That environment variable isn't consulted on other platforms, so we # don't even need to make this conditional. -env = {'FLEX_TMP_DIR': args.privatedir} +os.environ['FLEX_TMP_DIR'] = args.privatedir # build flex invocation command = [args.flex, '-o', args.output_file] @@ -58,7 +58,7 @@ command += args.flex_flags command += [args.input_file] # create .c file from .l file -sp = subprocess.run(command, env=env) +sp = subprocess.run(command) if sp.returncode != 0: sys.exit(sp.returncode) From c3e28e9fd936b83dbb6dfb5003b6221d98f8469c Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Mon, 30 Jun 2025 09:49:31 -0400 Subject: [PATCH 102/181] Avoid uninitialized value error in TAP tests' Cluster->psql If the method is called in scalar context and we didn't pass in a stderr handle, one won't be created. However, some error paths assume that it exists, so in this case create a dummy stderr to avoid the resulting perl error. Per gripe from Oleg Tselebrovskiy and adapted from his patch. Discussion: https://postgr.es/m/378eac5de4b8ecb5be7bcdf2db9d2c4d@postgrespro.ru --- src/test/perl/PostgreSQL/Test/Cluster.pm | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index 49b2c86b29cbf..301766d2ed93c 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -2199,6 +2199,14 @@ sub psql $ret = $?; }; my $exc_save = $@; + + # we need a dummy $stderr from hereon, if we didn't collect it + if (! defined $stderr) + { + my $errtxt = ""; + $stderr = \$errtxt; + } + if ($exc_save) { From f20a347e1a613cfc9053e7bc3d254608ae968386 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Mon, 30 Jun 2025 10:20:14 -0400 Subject: [PATCH 103/181] aio: Fix reference to outdated name Reported-by: Antonin Houska Author: Antonin Houska Discussion: https://postgr.es/m/5250.1751266701@localhost Backpatch-through: 18, where da7226993fd4 introduced this --- src/include/storage/aio_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/storage/aio_types.h b/src/include/storage/aio_types.h index 181833660778e..afee85c787b44 100644 --- a/src/include/storage/aio_types.h +++ b/src/include/storage/aio_types.h @@ -107,7 +107,7 @@ typedef struct PgAioResult /* of type PgAioResultStatus, see above */ uint32 status:PGAIO_RESULT_STATUS_BITS; - /* meaning defined by callback->error */ + /* meaning defined by callback->report */ uint32 error_data:PGAIO_RESULT_ERROR_BITS; int32 result; From bd09f024a1bbdd7a7e2ca944595a9d4b6c90fb83 Mon Sep 17 00:00:00 2001 From: Nathan Bossart Date: Mon, 30 Jun 2025 15:38:54 -0500 Subject: [PATCH 104/181] Add new OID alias type regdatabase. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This provides a convenient way to look up a database's OID. For example, the query SELECT * FROM pg_shdepend WHERE dbid = (SELECT oid FROM pg_database WHERE datname = current_database()); can now be simplified to SELECT * FROM pg_shdepend WHERE dbid = current_database()::regdatabase; Like the regrole type, regdatabase has cluster-wide scope, so we disallow regdatabase constants from appearing in stored expressions. Bumps catversion. Author: Ian Lawrence Barwick Reviewed-by: Greg Sabino Mullane Reviewed-by: Jian He Reviewed-by: Fabrízio de Royes Mello Reviewed-by: Tom Lane Discussion: https://postgr.es/m/aBpjJhyHpM2LYcG0%40nathan --- contrib/postgres_fdw/deparse.c | 6 + doc/src/sgml/datatype.sgml | 15 +- doc/src/sgml/func.sgml | 17 +++ doc/src/sgml/ref/pgupgrade.sgml | 3 +- src/backend/bootstrap/bootstrap.c | 2 + src/backend/catalog/dependency.c | 11 ++ src/backend/utils/adt/regproc.c | 118 +++++++++++++++ src/backend/utils/adt/selfuncs.c | 2 + src/backend/utils/cache/catcache.c | 1 + src/bin/pg_upgrade/check.c | 1 + src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_cast.dat | 14 ++ src/include/catalog/pg_proc.dat | 17 +++ src/include/catalog/pg_type.dat | 5 + src/test/regress/expected/regproc.out | 174 ++++++++++++++++++++++ src/test/regress/expected/type_sanity.out | 1 + src/test/regress/sql/regproc.sql | 38 +++++ src/test/regress/sql/type_sanity.sql | 1 + 18 files changed, 424 insertions(+), 4 deletions(-) diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index d9970dd675336..9351835b5e4f8 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -39,6 +39,7 @@ #include "catalog/pg_aggregate.h" #include "catalog/pg_authid.h" #include "catalog/pg_collation.h" +#include "catalog/pg_database.h" #include "catalog/pg_namespace.h" #include "catalog/pg_operator.h" #include "catalog/pg_opfamily.h" @@ -455,6 +456,11 @@ foreign_expr_walker(Node *node, AuthIdRelationId, fpinfo)) return false; break; + case REGDATABASEOID: + if (!is_shippable(DatumGetObjectId(c->constvalue), + DatabaseRelationId, fpinfo)) + return false; + break; } } diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 09309ba0390b7..49a7c180a803e 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -4737,6 +4737,10 @@ INSERT INTO mytable VALUES(-1); -- fails regconfig + + regdatabase + + regdictionary @@ -4878,6 +4882,13 @@ SELECT * FROM pg_attribute english + + regdatabase + pg_database + database name + template1 + + regdictionary pg_ts_dict @@ -5049,8 +5060,8 @@ WHERE ... be dropped without first removing the default expression. The alternative of nextval('my_seq'::text) does not create a dependency. - (regrole is an exception to this property. Constants of this - type are not allowed in stored expressions.) + (regdatabase and regrole are exceptions to this + property. Constants of these types are not allowed in stored expressions.) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 298791858be30..126b8cfbad81e 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -26750,6 +26750,23 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); + + + + to_regdatabase + + to_regdatabase ( text ) + regdatabase + + + Translates a textual database name to its OID. A similar result is + obtained by casting the string to type regdatabase (see + ); however, this function will return + NULL rather than throwing an error if the name is + not found. + + + diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml index aeeed297437e6..5ddf3a8ae9257 100644 --- a/doc/src/sgml/ref/pgupgrade.sgml +++ b/doc/src/sgml/ref/pgupgrade.sgml @@ -1110,7 +1110,8 @@ psql --username=postgres --file=script.sql postgres regproc regprocedure - (regclass, regrole, and regtype can be upgraded.) + (regclass, regdatabase, regrole, and + regtype can be upgraded.) diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index 6db864892d0dd..fc8638c1b61b6 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -109,6 +109,8 @@ static const struct typinfo TypInfo[] = { F_REGROLEIN, F_REGROLEOUT}, {"regnamespace", REGNAMESPACEOID, 0, 4, true, TYPALIGN_INT, TYPSTORAGE_PLAIN, InvalidOid, F_REGNAMESPACEIN, F_REGNAMESPACEOUT}, + {"regdatabase", REGDATABASEOID, 0, 4, true, TYPALIGN_INT, TYPSTORAGE_PLAIN, InvalidOid, + F_REGDATABASEIN, F_REGDATABASEOUT}, {"text", TEXTOID, 0, -1, false, TYPALIGN_INT, TYPSTORAGE_EXTENDED, DEFAULT_COLLATION_OID, F_TEXTIN, F_TEXTOUT}, {"oid", OIDOID, 0, 4, true, TYPALIGN_INT, TYPSTORAGE_PLAIN, InvalidOid, diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 18316a3968bcf..7dded634eb810 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -1850,6 +1850,17 @@ find_expr_references_walker(Node *node, errmsg("constant of the type %s cannot be used here", "regrole"))); break; + + /* + * Dependencies for regdatabase should be shared among all + * databases, so explicitly inhibit to have dependencies. + */ + case REGDATABASEOID: + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("constant of the type %s cannot be used here", + "regdatabase"))); + break; } } return false; diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index 5ee608a2b3921..b8bbe95e82eb8 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -30,6 +30,7 @@ #include "catalog/pg_ts_config.h" #include "catalog/pg_ts_dict.h" #include "catalog/pg_type.h" +#include "commands/dbcommands.h" #include "lib/stringinfo.h" #include "mb/pg_wchar.h" #include "miscadmin.h" @@ -1763,6 +1764,123 @@ regnamespacesend(PG_FUNCTION_ARGS) return oidsend(fcinfo); } +/* + * regdatabasein - converts database name to database OID + * + * We also accept a numeric OID, for symmetry with the output routine. + * + * '-' signifies unknown (OID 0). In all other cases, the input must + * match an existing pg_database entry. + */ +Datum +regdatabasein(PG_FUNCTION_ARGS) +{ + char *db_name_or_oid = PG_GETARG_CSTRING(0); + Node *escontext = fcinfo->context; + Oid result; + List *names; + + /* Handle "-" or numeric OID */ + if (parseDashOrOid(db_name_or_oid, &result, escontext)) + PG_RETURN_OID(result); + + /* The rest of this wouldn't work in bootstrap mode */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "regdatabase values must be OIDs in bootstrap mode"); + + /* Normal case: see if the name matches any pg_database entry. */ + names = stringToQualifiedNameList(db_name_or_oid, escontext); + if (names == NIL) + PG_RETURN_NULL(); + + if (list_length(names) != 1) + ereturn(escontext, (Datum) 0, + (errcode(ERRCODE_INVALID_NAME), + errmsg("invalid name syntax"))); + + result = get_database_oid(strVal(linitial(names)), true); + + if (!OidIsValid(result)) + ereturn(escontext, (Datum) 0, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("database \"%s\" does not exist", + strVal(linitial(names))))); + + PG_RETURN_OID(result); +} + +/* + * to_regdatabase - converts database name to database OID + * + * If the name is not found, we return NULL. + */ +Datum +to_regdatabase(PG_FUNCTION_ARGS) +{ + char *db_name = text_to_cstring(PG_GETARG_TEXT_PP(0)); + Datum result; + ErrorSaveContext escontext = {T_ErrorSaveContext}; + + if (!DirectInputFunctionCallSafe(regdatabasein, db_name, + InvalidOid, -1, + (Node *) &escontext, + &result)) + PG_RETURN_NULL(); + PG_RETURN_DATUM(result); +} + +/* + * regdatabaseout - converts database OID to database name + */ +Datum +regdatabaseout(PG_FUNCTION_ARGS) +{ + Oid dboid = PG_GETARG_OID(0); + char *result; + + if (dboid == InvalidOid) + { + result = pstrdup("-"); + PG_RETURN_CSTRING(result); + } + + result = get_database_name(dboid); + + if (result) + { + /* pstrdup is not really necessary, but it avoids a compiler warning */ + result = pstrdup(quote_identifier(result)); + } + else + { + /* If OID doesn't match any database, return it numerically */ + result = (char *) palloc(NAMEDATALEN); + snprintf(result, NAMEDATALEN, "%u", dboid); + } + + PG_RETURN_CSTRING(result); +} + +/* + * regdatabaserecv - converts external binary format to regdatabase + */ +Datum +regdatabaserecv(PG_FUNCTION_ARGS) +{ + /* Exactly the same as oidrecv, so share code */ + return oidrecv(fcinfo); +} + +/* + * regdatabasesend - converts regdatabase to binary format + */ +Datum +regdatabasesend(PG_FUNCTION_ARGS) +{ + /* Exactly the same as oidsend, so share code */ + return oidsend(fcinfo); +} + /* * text_regclass: convert text to regclass * diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 1e0f2de0336b0..ce6a626eba283 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -4619,6 +4619,7 @@ convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue, case REGDICTIONARYOID: case REGROLEOID: case REGNAMESPACEOID: + case REGDATABASEOID: *scaledvalue = convert_numeric_to_scalar(value, valuetypid, &failure); *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid, @@ -4751,6 +4752,7 @@ convert_numeric_to_scalar(Datum value, Oid typid, bool *failure) case REGDICTIONARYOID: case REGROLEOID: case REGNAMESPACEOID: + case REGDATABASEOID: /* we can treat OIDs as integers... */ return (double) DatumGetObjectId(value); } diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 657648996c235..d1b25214376ed 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -317,6 +317,7 @@ GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEq case REGDICTIONARYOID: case REGROLEOID: case REGNAMESPACEOID: + case REGDATABASEOID: *hashfunc = int4hashfast; *fasteqfunc = int4eqfast; *eqfunc = F_OIDEQ; diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 81865cd3e4859..fb063a2de4286 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -168,6 +168,7 @@ static DataTypesUsageChecks data_types_usage_checks[] = /* pg_class.oid is preserved, so 'regclass' is OK */ " 'regcollation', " " 'regconfig', " + /* pg_database.oid is preserved, so 'regdatabase' is OK */ " 'regdictionary', " " 'regnamespace', " " 'regoper', " diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 479629825f5b7..ff9ffd9d47498 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -57,6 +57,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202506291 +#define CATALOG_VERSION_NO 202506301 #endif diff --git a/src/include/catalog/pg_cast.dat b/src/include/catalog/pg_cast.dat index ab46be606f03d..fbfd669587f07 100644 --- a/src/include/catalog/pg_cast.dat +++ b/src/include/catalog/pg_cast.dat @@ -281,6 +281,20 @@ castcontext => 'a', castmethod => 'f' }, { castsource => 'regnamespace', casttarget => 'int4', castfunc => '0', castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regdatabase', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regdatabase', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regdatabase', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regdatabase', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regdatabase', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regdatabase', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regdatabase', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, # String category { castsource => 'text', casttarget => 'bpchar', castfunc => '0', diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index fb4f7f50350ad..d4650947c63a5 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -7455,6 +7455,17 @@ prorettype => 'regnamespace', proargtypes => 'text', prosrc => 'to_regnamespace' }, +{ oid => '8321', descr => 'I/O', + proname => 'regdatabasein', provolatile => 's', prorettype => 'regdatabase', + proargtypes => 'cstring', prosrc => 'regdatabasein' }, +{ oid => '8322', descr => 'I/O', + proname => 'regdatabaseout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regdatabase', prosrc => 'regdatabaseout' }, +{ oid => '8323', descr => 'convert database name to regdatabase', + proname => 'to_regdatabase', provolatile => 's', + prorettype => 'regdatabase', proargtypes => 'text', + prosrc => 'to_regdatabase' }, + { oid => '6210', descr => 'test whether string is valid input for data type', proname => 'pg_input_is_valid', provolatile => 's', prorettype => 'bool', proargtypes => 'text text', prosrc => 'pg_input_is_valid' }, @@ -8313,6 +8324,12 @@ { oid => '4088', descr => 'I/O', proname => 'regnamespacesend', prorettype => 'bytea', proargtypes => 'regnamespace', prosrc => 'regnamespacesend' }, +{ oid => '8324', descr => 'I/O', + proname => 'regdatabaserecv', prorettype => 'regdatabase', + proargtypes => 'internal', prosrc => 'regdatabaserecv' }, +{ oid => '8325', descr => 'I/O', + proname => 'regdatabasesend', prorettype => 'bytea', + proargtypes => 'regdatabase', prosrc => 'regdatabasesend' }, { oid => '2456', descr => 'I/O', proname => 'bit_recv', prorettype => 'bit', proargtypes => 'internal oid int4', prosrc => 'bit_recv' }, diff --git a/src/include/catalog/pg_type.dat b/src/include/catalog/pg_type.dat index 6dca77e0a22f7..29e4ffffc9806 100644 --- a/src/include/catalog/pg_type.dat +++ b/src/include/catalog/pg_type.dat @@ -399,6 +399,11 @@ typinput => 'regnamespacein', typoutput => 'regnamespaceout', typreceive => 'regnamespacerecv', typsend => 'regnamespacesend', typalign => 'i' }, +{ oid => '8326', array_type_oid => '8327', descr => 'registered database', + typname => 'regdatabase', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regdatabasein', typoutput => 'regdatabaseout', + typreceive => 'regdatabaserecv', typsend => 'regdatabasesend', + typalign => 'i' }, # uuid { oid => '2950', array_type_oid => '2951', descr => 'UUID', diff --git a/src/test/regress/expected/regproc.out b/src/test/regress/expected/regproc.out index 97b917502cabb..84c84aef4207f 100644 --- a/src/test/regress/expected/regproc.out +++ b/src/test/regress/expected/regproc.out @@ -192,6 +192,18 @@ SELECT regnamespace('"pg_catalog"'); pg_catalog (1 row) +SELECT regdatabase('template1'); + regdatabase +------------- + template1 +(1 row) + +SELECT regdatabase('"template1"'); + regdatabase +------------- + template1 +(1 row) + SELECT to_regrole('regress_regrole_test'); to_regrole ---------------------- @@ -216,6 +228,132 @@ SELECT to_regnamespace('"pg_catalog"'); pg_catalog (1 row) +SELECT to_regdatabase('template1'); + to_regdatabase +---------------- + template1 +(1 row) + +SELECT to_regdatabase('"template1"'); + to_regdatabase +---------------- + template1 +(1 row) + +-- special "single dash" case +SELECT regproc('-')::oid; + regproc +--------- + 0 +(1 row) + +SELECT regprocedure('-')::oid; + regprocedure +-------------- + 0 +(1 row) + +SELECT regclass('-')::oid; + regclass +---------- + 0 +(1 row) + +SELECT regcollation('-')::oid; + regcollation +-------------- + 0 +(1 row) + +SELECT regtype('-')::oid; + regtype +--------- + 0 +(1 row) + +SELECT regconfig('-')::oid; + regconfig +----------- + 0 +(1 row) + +SELECT regdictionary('-')::oid; + regdictionary +--------------- + 0 +(1 row) + +SELECT regrole('-')::oid; + regrole +--------- + 0 +(1 row) + +SELECT regnamespace('-')::oid; + regnamespace +-------------- + 0 +(1 row) + +SELECT regdatabase('-')::oid; + regdatabase +------------- + 0 +(1 row) + +SELECT to_regproc('-')::oid; + to_regproc +------------ + 0 +(1 row) + +SELECT to_regprocedure('-')::oid; + to_regprocedure +----------------- + 0 +(1 row) + +SELECT to_regclass('-')::oid; + to_regclass +------------- + 0 +(1 row) + +SELECT to_regcollation('-')::oid; + to_regcollation +----------------- + 0 +(1 row) + +SELECT to_regtype('-')::oid; + to_regtype +------------ + 0 +(1 row) + +SELECT to_regrole('-')::oid; + to_regrole +------------ + 0 +(1 row) + +SELECT to_regnamespace('-')::oid; + to_regnamespace +----------------- + 0 +(1 row) + +SELECT to_regdatabase('-')::oid; + to_regdatabase +---------------- + 0 +(1 row) + +-- constant cannot be used here +CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole); +ERROR: constant of the type regrole cannot be used here +CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase); +ERROR: constant of the type regdatabase cannot be used here /* If objects don't exist, raise errors. */ DROP ROLE regress_regrole_test; -- without schemaname @@ -305,6 +443,18 @@ SELECT regnamespace('foo.bar'); ERROR: invalid name syntax LINE 1: SELECT regnamespace('foo.bar'); ^ +SELECT regdatabase('Nonexistent'); +ERROR: database "nonexistent" does not exist +LINE 1: SELECT regdatabase('Nonexistent'); + ^ +SELECT regdatabase('"Nonexistent"'); +ERROR: database "Nonexistent" does not exist +LINE 1: SELECT regdatabase('"Nonexistent"'); + ^ +SELECT regdatabase('foo.bar'); +ERROR: invalid name syntax +LINE 1: SELECT regdatabase('foo.bar'); + ^ /* If objects don't exist, return NULL with no error. */ -- without schemaname SELECT to_regoper('||//'); @@ -447,6 +597,24 @@ SELECT to_regnamespace('foo.bar'); (1 row) +SELECT to_regdatabase('Nonexistent'); + to_regdatabase +---------------- + +(1 row) + +SELECT to_regdatabase('"Nonexistent"'); + to_regdatabase +---------------- + +(1 row) + +SELECT to_regdatabase('foo.bar'); + to_regdatabase +---------------- + +(1 row) + -- Test to_regtypemod SELECT to_regtypemod('text'); to_regtypemod @@ -569,6 +737,12 @@ SELECT * FROM pg_input_error_info('no_such_type', 'regtype'); type "no_such_type" does not exist | | | 42704 (1 row) +SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase'); + message | detail | hint | sql_error_code +---------------------------------------+--------+------+---------------- + database "nonexistent" does not exist | | | 42704 +(1 row) + -- Some cases that should be soft errors, but are not yet SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype'); ERROR: syntax error at or near "type" diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out index dd0c52ab08b50..943e56506bf1b 100644 --- a/src/test/regress/expected/type_sanity.out +++ b/src/test/regress/expected/type_sanity.out @@ -711,6 +711,7 @@ CREATE TABLE tab_core_types AS SELECT 'regtype'::regtype type, 'pg_monitor'::regrole, 'pg_class'::regclass::oid, + 'template1'::regdatabase, '(1,1)'::tid, '2'::xid, '3'::cid, '10:20:10,14,15'::txid_snapshot, '10:20:10,14,15'::pg_snapshot, diff --git a/src/test/regress/sql/regproc.sql b/src/test/regress/sql/regproc.sql index 232289ac39823..cfec8f8c754a2 100644 --- a/src/test/regress/sql/regproc.sql +++ b/src/test/regress/sql/regproc.sql @@ -47,11 +47,42 @@ SELECT regrole('regress_regrole_test'); SELECT regrole('"regress_regrole_test"'); SELECT regnamespace('pg_catalog'); SELECT regnamespace('"pg_catalog"'); +SELECT regdatabase('template1'); +SELECT regdatabase('"template1"'); SELECT to_regrole('regress_regrole_test'); SELECT to_regrole('"regress_regrole_test"'); SELECT to_regnamespace('pg_catalog'); SELECT to_regnamespace('"pg_catalog"'); +SELECT to_regdatabase('template1'); +SELECT to_regdatabase('"template1"'); + +-- special "single dash" case + +SELECT regproc('-')::oid; +SELECT regprocedure('-')::oid; +SELECT regclass('-')::oid; +SELECT regcollation('-')::oid; +SELECT regtype('-')::oid; +SELECT regconfig('-')::oid; +SELECT regdictionary('-')::oid; +SELECT regrole('-')::oid; +SELECT regnamespace('-')::oid; +SELECT regdatabase('-')::oid; + +SELECT to_regproc('-')::oid; +SELECT to_regprocedure('-')::oid; +SELECT to_regclass('-')::oid; +SELECT to_regcollation('-')::oid; +SELECT to_regtype('-')::oid; +SELECT to_regrole('-')::oid; +SELECT to_regnamespace('-')::oid; +SELECT to_regdatabase('-')::oid; + +-- constant cannot be used here + +CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole); +CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase); /* If objects don't exist, raise errors. */ @@ -88,6 +119,9 @@ SELECT regrole('foo.bar'); SELECT regnamespace('Nonexistent'); SELECT regnamespace('"Nonexistent"'); SELECT regnamespace('foo.bar'); +SELECT regdatabase('Nonexistent'); +SELECT regdatabase('"Nonexistent"'); +SELECT regdatabase('foo.bar'); /* If objects don't exist, return NULL with no error. */ @@ -122,6 +156,9 @@ SELECT to_regrole('foo.bar'); SELECT to_regnamespace('Nonexistent'); SELECT to_regnamespace('"Nonexistent"'); SELECT to_regnamespace('foo.bar'); +SELECT to_regdatabase('Nonexistent'); +SELECT to_regdatabase('"Nonexistent"'); +SELECT to_regdatabase('foo.bar'); -- Test to_regtypemod SELECT to_regtypemod('text'); @@ -147,6 +184,7 @@ SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric)', 'regprocedure'); SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric', 'regprocedure'); SELECT * FROM pg_input_error_info('regress_regrole_test', 'regrole'); SELECT * FROM pg_input_error_info('no_such_type', 'regtype'); +SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase'); -- Some cases that should be soft errors, but are not yet SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype'); diff --git a/src/test/regress/sql/type_sanity.sql b/src/test/regress/sql/type_sanity.sql index c94dd83d3061c..df795759bb4cb 100644 --- a/src/test/regress/sql/type_sanity.sql +++ b/src/test/regress/sql/type_sanity.sql @@ -539,6 +539,7 @@ CREATE TABLE tab_core_types AS SELECT 'regtype'::regtype type, 'pg_monitor'::regrole, 'pg_class'::regclass::oid, + 'template1'::regdatabase, '(1,1)'::tid, '2'::xid, '3'::cid, '10:20:10,14,15'::txid_snapshot, '10:20:10,14,15'::pg_snapshot, From 999f172ded2bae7efbd8bf1dd6f823095395493f Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 30 Jun 2025 16:59:36 -0400 Subject: [PATCH 105/181] De-reserve keywords EXECUTE and STRICT in PL/pgSQL. On close inspection, there does not seem to be a strong reason why these should be fully-reserved keywords. I guess they just escaped consideration in previous attempts to minimize PL/pgSQL's list of reserved words. Author: Tom Lane Reviewed-by: Pavel Stehule Discussion: https://postgr.es/m/2185258.1745617445@sss.pgh.pa.us --- src/pl/plpgsql/src/expected/plpgsql_misc.out | 14 ++++++++++++++ src/pl/plpgsql/src/pl_gram.y | 13 +++++++++---- src/pl/plpgsql/src/pl_reserved_kwlist.h | 2 -- src/pl/plpgsql/src/pl_scanner.c | 2 +- src/pl/plpgsql/src/pl_unreserved_kwlist.h | 2 ++ src/pl/plpgsql/src/sql/plpgsql_misc.sql | 13 +++++++++++++ 6 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/pl/plpgsql/src/expected/plpgsql_misc.out b/src/pl/plpgsql/src/expected/plpgsql_misc.out index a6511df08ec9f..7bb4f432e7daf 100644 --- a/src/pl/plpgsql/src/expected/plpgsql_misc.out +++ b/src/pl/plpgsql/src/expected/plpgsql_misc.out @@ -65,3 +65,17 @@ do $$ declare x public.foo%rowtype; begin end $$; ERROR: relation "public.foo" does not exist CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 1 do $$ declare x public.misc_table%rowtype; begin end $$; +-- Test handling of an unreserved keyword as a variable name +-- and record field name. +do $$ +declare + execute int; + r record; +begin + execute := 10; + raise notice 'execute = %', execute; + select 1 as strict into r; + raise notice 'r.strict = %', r.strict; +end $$; +NOTICE: execute = 10 +NOTICE: r.strict = 1 diff --git a/src/pl/plpgsql/src/pl_gram.y b/src/pl/plpgsql/src/pl_gram.y index 5612e66d0239d..7b672ea5179a6 100644 --- a/src/pl/plpgsql/src/pl_gram.y +++ b/src/pl/plpgsql/src/pl_gram.y @@ -1368,7 +1368,8 @@ for_control : for_variable K_IN int tok = yylex(&yylval, &yylloc, yyscanner); int tokloc = yylloc; - if (tok == K_EXECUTE) + if (tok_is_keyword(tok, &yylval, + K_EXECUTE, "execute")) { /* EXECUTE means it's a dynamic FOR loop */ PLpgSQL_stmt_dynfors *new; @@ -2135,7 +2136,8 @@ stmt_open : K_OPEN cursor_variable yyerror(&yylloc, NULL, yyscanner, "syntax error, expected \"FOR\""); tok = yylex(&yylval, &yylloc, yyscanner); - if (tok == K_EXECUTE) + if (tok_is_keyword(tok, &yylval, + K_EXECUTE, "execute")) { int endtoken; @@ -2536,6 +2538,7 @@ unreserved_keyword : | K_ERRCODE | K_ERROR | K_EXCEPTION + | K_EXECUTE | K_EXIT | K_FETCH | K_FIRST @@ -2581,6 +2584,7 @@ unreserved_keyword : | K_SLICE | K_SQLSTATE | K_STACKED + | K_STRICT | K_TABLE | K_TABLE_NAME | K_TYPE @@ -3514,7 +3518,8 @@ make_return_query_stmt(int location, YYSTYPE *yylvalp, YYLTYPE *yyllocp, yyscan_ new->stmtid = ++plpgsql_curr_compile->nstatements; /* check for RETURN QUERY EXECUTE */ - if ((tok = yylex(yylvalp, yyllocp, yyscanner)) != K_EXECUTE) + tok = yylex(yylvalp, yyllocp, yyscanner); + if (!tok_is_keyword(tok, yylvalp, K_EXECUTE, "execute")) { /* ordinary static query */ plpgsql_push_back_token(tok, yylvalp, yyllocp, yyscanner); @@ -3597,7 +3602,7 @@ read_into_target(PLpgSQL_variable **target, bool *strict, YYSTYPE *yylvalp, YYLT *strict = false; tok = yylex(yylvalp, yyllocp, yyscanner); - if (strict && tok == K_STRICT) + if (strict && tok_is_keyword(tok, yylvalp, K_STRICT, "strict")) { *strict = true; tok = yylex(yylvalp, yyllocp, yyscanner); diff --git a/src/pl/plpgsql/src/pl_reserved_kwlist.h b/src/pl/plpgsql/src/pl_reserved_kwlist.h index ce7b0c9d33121..f3ef2cbd8d7dc 100644 --- a/src/pl/plpgsql/src/pl_reserved_kwlist.h +++ b/src/pl/plpgsql/src/pl_reserved_kwlist.h @@ -33,7 +33,6 @@ PG_KEYWORD("case", K_CASE) PG_KEYWORD("declare", K_DECLARE) PG_KEYWORD("else", K_ELSE) PG_KEYWORD("end", K_END) -PG_KEYWORD("execute", K_EXECUTE) PG_KEYWORD("for", K_FOR) PG_KEYWORD("foreach", K_FOREACH) PG_KEYWORD("from", K_FROM) @@ -44,7 +43,6 @@ PG_KEYWORD("loop", K_LOOP) PG_KEYWORD("not", K_NOT) PG_KEYWORD("null", K_NULL) PG_KEYWORD("or", K_OR) -PG_KEYWORD("strict", K_STRICT) PG_KEYWORD("then", K_THEN) PG_KEYWORD("to", K_TO) PG_KEYWORD("using", K_USING) diff --git a/src/pl/plpgsql/src/pl_scanner.c b/src/pl/plpgsql/src/pl_scanner.c index d08187dafcb4c..19825e5c71810 100644 --- a/src/pl/plpgsql/src/pl_scanner.c +++ b/src/pl/plpgsql/src/pl_scanner.c @@ -53,7 +53,7 @@ IdentifierLookup plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL; * We try to avoid reserving more keywords than we have to; but there's * little point in not reserving a word if it's reserved in the core grammar. * Currently, the following words are reserved here but not in the core: - * BEGIN BY DECLARE EXECUTE FOREACH IF LOOP STRICT WHILE + * BEGIN BY DECLARE FOREACH IF LOOP WHILE */ /* ScanKeywordList lookup data for PL/pgSQL keywords */ diff --git a/src/pl/plpgsql/src/pl_unreserved_kwlist.h b/src/pl/plpgsql/src/pl_unreserved_kwlist.h index 98f99ec470cf4..b48c5a645ffaf 100644 --- a/src/pl/plpgsql/src/pl_unreserved_kwlist.h +++ b/src/pl/plpgsql/src/pl_unreserved_kwlist.h @@ -58,6 +58,7 @@ PG_KEYWORD("elsif", K_ELSIF) PG_KEYWORD("errcode", K_ERRCODE) PG_KEYWORD("error", K_ERROR) PG_KEYWORD("exception", K_EXCEPTION) +PG_KEYWORD("execute", K_EXECUTE) PG_KEYWORD("exit", K_EXIT) PG_KEYWORD("fetch", K_FETCH) PG_KEYWORD("first", K_FIRST) @@ -103,6 +104,7 @@ PG_KEYWORD("scroll", K_SCROLL) PG_KEYWORD("slice", K_SLICE) PG_KEYWORD("sqlstate", K_SQLSTATE) PG_KEYWORD("stacked", K_STACKED) +PG_KEYWORD("strict", K_STRICT) PG_KEYWORD("table", K_TABLE) PG_KEYWORD("table_name", K_TABLE_NAME) PG_KEYWORD("type", K_TYPE) diff --git a/src/pl/plpgsql/src/sql/plpgsql_misc.sql b/src/pl/plpgsql/src/sql/plpgsql_misc.sql index d3a7f703a758d..103a20bf8820c 100644 --- a/src/pl/plpgsql/src/sql/plpgsql_misc.sql +++ b/src/pl/plpgsql/src/sql/plpgsql_misc.sql @@ -37,3 +37,16 @@ do $$ declare x foo.bar%rowtype; begin end $$; do $$ declare x foo.bar.baz%rowtype; begin end $$; do $$ declare x public.foo%rowtype; begin end $$; do $$ declare x public.misc_table%rowtype; begin end $$; + +-- Test handling of an unreserved keyword as a variable name +-- and record field name. +do $$ +declare + execute int; + r record; +begin + execute := 10; + raise notice 'execute = %', execute; + select 1 as strict into r; + raise notice 'r.strict = %', r.strict; +end $$; From 0836683a8977cac07d8cbdd0462f8a3e7e32565f Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 30 Jun 2025 17:06:39 -0400 Subject: [PATCH 106/181] Improve error report for PL/pgSQL reserved word used as a field name. The current code in resolve_column_ref (dating to commits 01f7d2990 and fe24d7816) believes that not finding a RECFIELD datum is a can't-happen case, in consequence of which I didn't spend a whole lot of time considering what to do if it did happen. But it turns out that it *can* happen if the would-be field name is a fully-reserved PL/pgSQL keyword. Change the error message to describe that situation, and add a test case demonstrating it. This might need further refinement if anyone can find other ways to trigger a failure here; but without an example it's not clear what other error to throw. Author: Tom Lane Reviewed-by: Pavel Stehule Discussion: https://postgr.es/m/2185258.1745617445@sss.pgh.pa.us --- src/pl/plpgsql/src/expected/plpgsql_misc.out | 22 ++++++++++++++++++++ src/pl/plpgsql/src/pl_comp.c | 19 ++++++++++------- src/pl/plpgsql/src/sql/plpgsql_misc.sql | 16 ++++++++++++++ 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/src/pl/plpgsql/src/expected/plpgsql_misc.out b/src/pl/plpgsql/src/expected/plpgsql_misc.out index 7bb4f432e7daf..ffb377f5f54ff 100644 --- a/src/pl/plpgsql/src/expected/plpgsql_misc.out +++ b/src/pl/plpgsql/src/expected/plpgsql_misc.out @@ -79,3 +79,25 @@ begin end $$; NOTICE: execute = 10 NOTICE: r.strict = 1 +-- Test handling of a reserved keyword as a record field name. +do $$ declare r record; +begin + select 1 as x, 2 as foreach into r; + raise notice 'r.x = %', r.x; + raise notice 'r.foreach = %', r.foreach; -- fails +end $$; +NOTICE: r.x = 1 +ERROR: field name "foreach" is a reserved key word +LINE 1: r.foreach + ^ +HINT: Use double quotes to quote it. +QUERY: r.foreach +CONTEXT: PL/pgSQL function inline_code_block line 5 at RAISE +do $$ declare r record; +begin + select 1 as x, 2 as foreach into r; + raise notice 'r.x = %', r.x; + raise notice 'r."foreach" = %', r."foreach"; -- ok +end $$; +NOTICE: r.x = 1 +NOTICE: r."foreach" = 2 diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index b80c59447fb57..ee961425a5b7e 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -1211,17 +1211,22 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, } /* - * We should not get here, because a RECFIELD datum should - * have been built at parse time for every possible qualified - * reference to fields of this record. But if we do, handle - * it like field-not-found: throw error or return NULL. + * Ideally we'd never get here, because a RECFIELD datum + * should have been built at parse time for every qualified + * reference to a field of this record that appears in the + * source text. However, plpgsql_yylex will not build such a + * datum unless the field name lexes as token type IDENT. + * Hence, if the would-be field name is a PL/pgSQL reserved + * word, we lose. Assume that that's what happened and tell + * the user to quote it, unless the caller prefers we just + * return NULL. */ if (error_if_no_field) ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - (nnames_field == 1) ? name1 : name2, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("field name \"%s\" is a reserved key word", colname), + errhint("Use double quotes to quote it."), parser_errposition(pstate, cref->location))); } break; diff --git a/src/pl/plpgsql/src/sql/plpgsql_misc.sql b/src/pl/plpgsql/src/sql/plpgsql_misc.sql index 103a20bf8820c..0bc39fcf3257c 100644 --- a/src/pl/plpgsql/src/sql/plpgsql_misc.sql +++ b/src/pl/plpgsql/src/sql/plpgsql_misc.sql @@ -50,3 +50,19 @@ begin select 1 as strict into r; raise notice 'r.strict = %', r.strict; end $$; + +-- Test handling of a reserved keyword as a record field name. + +do $$ declare r record; +begin + select 1 as x, 2 as foreach into r; + raise notice 'r.x = %', r.x; + raise notice 'r.foreach = %', r.foreach; -- fails +end $$; + +do $$ declare r record; +begin + select 1 as x, 2 as foreach into r; + raise notice 'r.x = %', r.x; + raise notice 'r."foreach" = %', r."foreach"; -- ok +end $$; From 2e947217474c15c7fd9011d1ab2b0d4657b3eae2 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 1 Jul 2025 08:57:05 +0900 Subject: [PATCH 107/181] Improve error handling of libxml2 calls in xml.c This commit fixes some defects in the backend's xml.c, found upon inspection of the internals of libxml2: - xmlEncodeSpecialChars() can fail on malloc(), returning NULL back to the caller. xmltext() assumed that this could never happen. Like other code paths, a TRY/CATCH block is added there, covering also the fact that cstring_to_text_with_len() could fail a memory allocation, where the backend would miss to free the buffer allocated by xmlEncodeSpecialChars(). - Some libxml2 routines called in xmlelement() can return NULL, like xmlAddChildList() or xmlTextWriterStartElement(). Dedicated errors are added for them. - xml_xmlnodetoxmltype() missed that xmlXPathCastNodeToString() can fail on an allocation failure. In this case, the call can just be moved to the existing TRY/CATCH block. All these code paths would cause the server to crash. As this is unlikely a problem in practice, no backpatch is done. Jim and I have caught these defects, not sure who has scored the most. The contrib module xml2/ has similar defects, which will be addressed in a separate change. Reported-by: Jim Jones Reviewed-by: Jim Jones Discussion: https://postgr.es/m/aEEingzOta_S_Nu7@paquier.xyz --- src/backend/utils/adt/xml.c | 78 +++++++++++++++++++++++++++++-------- 1 file changed, 62 insertions(+), 16 deletions(-) diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index a4150bff2eaea..2bd39b6ac4b09 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -529,14 +529,36 @@ xmltext(PG_FUNCTION_ARGS) #ifdef USE_LIBXML text *arg = PG_GETARG_TEXT_PP(0); text *result; - xmlChar *xmlbuf = NULL; + volatile xmlChar *xmlbuf = NULL; + PgXmlErrorContext *xmlerrcxt; + + /* Otherwise, we gotta spin up some error handling. */ + xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL); - xmlbuf = xmlEncodeSpecialChars(NULL, xml_text2xmlChar(arg)); + PG_TRY(); + { + xmlbuf = xmlEncodeSpecialChars(NULL, xml_text2xmlChar(arg)); - Assert(xmlbuf); + if (xmlbuf == NULL || xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate xmlChar"); + + result = cstring_to_text_with_len((const char *) xmlbuf, + xmlStrlen((const xmlChar *) xmlbuf)); + } + PG_CATCH(); + { + if (xmlbuf) + xmlFree((xmlChar *) xmlbuf); + + pg_xml_done(xmlerrcxt, true); + PG_RE_THROW(); + } + PG_END_TRY(); + + xmlFree((xmlChar *) xmlbuf); + pg_xml_done(xmlerrcxt, false); - result = cstring_to_text_with_len((const char *) xmlbuf, xmlStrlen(xmlbuf)); - xmlFree(xmlbuf); PG_RETURN_XML_P(result); #else NO_XML_SUPPORT(); @@ -770,7 +792,10 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent) if (oldroot != NULL) xmlFreeNode(oldroot); - xmlAddChildList(root, content_nodes); + if (xmlAddChildList(root, content_nodes) == NULL || + xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR, + "could not append xml node list"); /* * We use this node to insert newlines in the dump. Note: in at @@ -931,7 +956,10 @@ xmlelement(XmlExpr *xexpr, xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, "could not allocate xmlTextWriter"); - xmlTextWriterStartElement(writer, (xmlChar *) xexpr->name); + if (xmlTextWriterStartElement(writer, (xmlChar *) xexpr->name) < 0 || + xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR, + "could not start xml element"); forboth(arg, named_arg_strings, narg, xexpr->arg_names) { @@ -939,19 +967,30 @@ xmlelement(XmlExpr *xexpr, char *argname = strVal(lfirst(narg)); if (str) - xmlTextWriterWriteAttribute(writer, - (xmlChar *) argname, - (xmlChar *) str); + { + if (xmlTextWriterWriteAttribute(writer, + (xmlChar *) argname, + (xmlChar *) str) < 0 || + xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR, + "could not write xml attribute"); + } } foreach(arg, arg_strings) { char *str = (char *) lfirst(arg); - xmlTextWriterWriteRaw(writer, (xmlChar *) str); + if (xmlTextWriterWriteRaw(writer, (xmlChar *) str) < 0 || + xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR, + "could not write raw xml text"); } - xmlTextWriterEndElement(writer); + if (xmlTextWriterEndElement(writer) < 0 || + xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR, + "could not end xml element"); /* we MUST do this now to flush data out to the buffer ... */ xmlFreeTextWriter(writer); @@ -4220,20 +4259,27 @@ xml_xmlnodetoxmltype(xmlNodePtr cur, PgXmlErrorContext *xmlerrcxt) } else { - xmlChar *str; + volatile xmlChar *str = NULL; - str = xmlXPathCastNodeToString(cur); PG_TRY(); { + char *escaped; + + str = xmlXPathCastNodeToString(cur); + if (str == NULL || xmlerrcxt->err_occurred) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate xmlChar"); + /* Here we rely on XML having the same representation as TEXT */ - char *escaped = escape_xml((char *) str); + escaped = escape_xml((char *) str); result = (xmltype *) cstring_to_text(escaped); pfree(escaped); } PG_FINALLY(); { - xmlFree(str); + if (str) + xmlFree((xmlChar *) str); } PG_END_TRY(); } From a3df0d43d93789777fd06bb7ffa8cdc1f06d63c3 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 1 Jul 2025 09:41:42 +0900 Subject: [PATCH 108/181] Fix typo in system_views.sql's definition of pg_stat_activity backend_xmin used a lower-character 's' instead of the upper-character 'S' like the other attributes. This is harmless, but let's be consistent. Issue introduced in dd1a3bccca24. Author: Daisuke Higuchi Discussion: https://postgr.es/m/CAEVT6c8M39cqWje-df39wWr0KWcDgGKd5fMvQo84zvCXKoEL9Q@mail.gmail.com --- src/backend/catalog/system_views.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 08f780a2e6382..e5dbbe61b811a 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -895,7 +895,7 @@ CREATE VIEW pg_stat_activity AS S.wait_event, S.state, S.backend_xid, - s.backend_xmin, + S.backend_xmin, S.query_id, S.query, S.backend_type From c67989789cec3953effca4e01dff834abff9116a Mon Sep 17 00:00:00 2001 From: Amit Langote Date: Tue, 1 Jul 2025 13:13:48 +0900 Subject: [PATCH 109/181] Fix typos in comments Commit 19d8e2308bc added enum values with the prefix TU_, but a few comments still referred to TUUI_, which was used in development versions of the patches committed as 19d8e2308bc. Author: Yugo Nagata Discussion: https://postgr.es/m/20250701110216.8ac8a9e4c6f607f1d954f44a@sraoss.co.jp Backpatch-through: 16 --- src/backend/executor/execIndexing.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index bdf862b24062e..ca33a854278ed 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -279,7 +279,7 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo) * executor is performing an UPDATE that could not use an * optimization like heapam's HOT (in more general terms a * call to table_tuple_update() took place and set - * 'update_indexes' to TUUI_All). Receiving this hint makes + * 'update_indexes' to TU_All). Receiving this hint makes * us consider if we should pass down the 'indexUnchanged' * hint in turn. That's something that we figure out for * each index_insert() call iff 'update' is true. @@ -290,7 +290,7 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo) * HOT has been applied and any updated columns are indexed * only by summarizing indexes (or in more general terms a * call to table_tuple_update() took place and set - * 'update_indexes' to TUUI_Summarizing). We can (and must) + * 'update_indexes' to TU_Summarizing). We can (and must) * therefore only update the indexes that have * 'amsummarizing' = true. * From 732061150b004385810e522f8629f5bf91d977b7 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Tue, 1 Jul 2025 15:48:32 +0900 Subject: [PATCH 110/181] xml2: Improve error handling of libxml2 calls The contrib module xml2/ has always been fuzzy with the cleanup of the memory allocated by the calls internal to libxml2, even if there are APIs in place giving a lot of control over the error behavior, all located in the backend's xml.c. The code paths fixed in the commit address multiple defects, while sanitizing the code: - In xpath.c, several allocations are done by libxml2 for xpath_workspace, whose memory cleanup could go out of sight as it relied on a single TRY/CATCH block done in pgxml_xpath(). workspace->res is allocated by libxml2, and may finish by not being freed at all upon a failure outside of a TRY area. This code is refactored so as the TRY/CATCH block of pgxml_xpath() is moved one level higher to its callers, which are responsible for cleaning up the contents of a workspace on failure. cleanup_workspace() now requires a volatile workspace, forcing as a rule that a TRY/CATCH block should be used. - Several calls, like xmlStrdup(), xmlXPathNewContext(), xmlXPathCtxtCompile(), etc. can return NULL on failures (for most of them allocation failures. These forgot to check for failures, or missed that pg_xml_error_occurred() should be called, to check if an error is already on the stack. - Some memory allocated by libxml2 calls was freed in an incorrect way, "resstr" in xslt_process() being one example. The class of errors fixed here are for problems that are unlikely going to happen in practice, so no backpatch is done. The changes have finished by being rather invasive, so it is perhaps not a bad thing to be conservative and to keep these changes only on HEAD anyway. Author: Michael Paquier Reported-by: Karavaev Alexey Reviewed-by: Jim Jones Reviewed-by: Tom Lane Discussion: https://postgr.es/m/18943-2f2a04ab03904598@postgresql.org --- contrib/xml2/xpath.c | 421 ++++++++++++++++++++++++++------------- contrib/xml2/xslt_proc.c | 26 ++- 2 files changed, 296 insertions(+), 151 deletions(-) diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index 23d3f332dbaa7..3f733405ec6db 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -51,10 +51,10 @@ static text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *toptag, static xmlChar *pgxml_texttoxmlchar(text *textstring); -static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar *xpath, - xpath_workspace *workspace); +static xpath_workspace *pgxml_xpath(text *document, xmlChar *xpath, + PgXmlErrorContext *xmlerrcxt); -static void cleanup_workspace(xpath_workspace *workspace); +static void cleanup_workspace(volatile xpath_workspace *workspace); /* @@ -89,18 +89,40 @@ xml_encode_special_chars(PG_FUNCTION_ARGS) { text *tin = PG_GETARG_TEXT_PP(0); text *tout; - xmlChar *ts, - *tt; + volatile xmlChar *tt = NULL; + PgXmlErrorContext *xmlerrcxt; + + xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL); + + PG_TRY(); + { + xmlChar *ts; - ts = pgxml_texttoxmlchar(tin); + ts = pgxml_texttoxmlchar(tin); + + tt = xmlEncodeSpecialChars(NULL, ts); + if (tt == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate xmlChar"); + pfree(ts); + + tout = cstring_to_text((char *) tt); + } + PG_CATCH(); + { + if (tt != NULL) + xmlFree((xmlChar *) tt); - tt = xmlEncodeSpecialChars(NULL, ts); + pg_xml_done(xmlerrcxt, true); - pfree(ts); + PG_RE_THROW(); + } + PG_END_TRY(); - tout = cstring_to_text((char *) tt); + if (tt != NULL) + xmlFree((xmlChar *) tt); - xmlFree(tt); + pg_xml_done(xmlerrcxt, false); PG_RETURN_TEXT_P(tout); } @@ -122,62 +144,90 @@ pgxmlNodeSetToText(xmlNodeSetPtr nodeset, xmlChar *septagname, xmlChar *plainsep) { - xmlBufferPtr buf; + volatile xmlBufferPtr buf = NULL; xmlChar *result; int i; + PgXmlErrorContext *xmlerrcxt; - buf = xmlBufferCreate(); + /* spin some error handling */ + xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL); - if ((toptagname != NULL) && (xmlStrlen(toptagname) > 0)) - { - xmlBufferWriteChar(buf, "<"); - xmlBufferWriteCHAR(buf, toptagname); - xmlBufferWriteChar(buf, ">"); - } - if (nodeset != NULL) + PG_TRY(); { - for (i = 0; i < nodeset->nodeNr; i++) - { - if (plainsep != NULL) - { - xmlBufferWriteCHAR(buf, - xmlXPathCastNodeToString(nodeset->nodeTab[i])); + buf = xmlBufferCreate(); - /* If this isn't the last entry, write the plain sep. */ - if (i < (nodeset->nodeNr) - 1) - xmlBufferWriteChar(buf, (char *) plainsep); - } - else + if (buf == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate xmlBuffer"); + + if ((toptagname != NULL) && (xmlStrlen(toptagname) > 0)) + { + xmlBufferWriteChar(buf, "<"); + xmlBufferWriteCHAR(buf, toptagname); + xmlBufferWriteChar(buf, ">"); + } + if (nodeset != NULL) + { + for (i = 0; i < nodeset->nodeNr; i++) { - if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + if (plainsep != NULL) { - xmlBufferWriteChar(buf, "<"); - xmlBufferWriteCHAR(buf, septagname); - xmlBufferWriteChar(buf, ">"); - } - xmlNodeDump(buf, - nodeset->nodeTab[i]->doc, - nodeset->nodeTab[i], - 1, 0); + xmlBufferWriteCHAR(buf, + xmlXPathCastNodeToString(nodeset->nodeTab[i])); - if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + /* If this isn't the last entry, write the plain sep. */ + if (i < (nodeset->nodeNr) - 1) + xmlBufferWriteChar(buf, (char *) plainsep); + } + else { - xmlBufferWriteChar(buf, ""); + if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + { + xmlBufferWriteChar(buf, "<"); + xmlBufferWriteCHAR(buf, septagname); + xmlBufferWriteChar(buf, ">"); + } + xmlNodeDump(buf, + nodeset->nodeTab[i]->doc, + nodeset->nodeTab[i], + 1, 0); + + if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + { + xmlBufferWriteChar(buf, ""); + } } } } - } - if ((toptagname != NULL) && (xmlStrlen(toptagname) > 0)) + if ((toptagname != NULL) && (xmlStrlen(toptagname) > 0)) + { + xmlBufferWriteChar(buf, ""); + } + + result = xmlStrdup(buf->content); + if (result == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate result"); + } + PG_CATCH(); { - xmlBufferWriteChar(buf, ""); + if (buf) + xmlBufferFree(buf); + + pg_xml_done(xmlerrcxt, true); + + PG_RE_THROW(); } - result = xmlStrdup(buf->content); + PG_END_TRY(); + xmlBufferFree(buf); + pg_xml_done(xmlerrcxt, false); + return result; } @@ -208,16 +258,29 @@ xpath_nodeset(PG_FUNCTION_ARGS) xmlChar *septag = pgxml_texttoxmlchar(PG_GETARG_TEXT_PP(3)); xmlChar *xpath; text *xpres; - xmlXPathObjectPtr res; - xpath_workspace workspace; + volatile xpath_workspace *workspace; + PgXmlErrorContext *xmlerrcxt; xpath = pgxml_texttoxmlchar(xpathsupp); + xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); - res = pgxml_xpath(document, xpath, &workspace); + PG_TRY(); + { + workspace = pgxml_xpath(document, xpath, xmlerrcxt); + xpres = pgxml_result_to_text(workspace->res, toptag, septag, NULL); + } + PG_CATCH(); + { + if (workspace) + cleanup_workspace(workspace); - xpres = pgxml_result_to_text(res, toptag, septag, NULL); + pg_xml_done(xmlerrcxt, true); + PG_RE_THROW(); + } + PG_END_TRY(); - cleanup_workspace(&workspace); + cleanup_workspace(workspace); + pg_xml_done(xmlerrcxt, false); pfree(xpath); @@ -240,16 +303,29 @@ xpath_list(PG_FUNCTION_ARGS) xmlChar *plainsep = pgxml_texttoxmlchar(PG_GETARG_TEXT_PP(2)); xmlChar *xpath; text *xpres; - xmlXPathObjectPtr res; - xpath_workspace workspace; + volatile xpath_workspace *workspace; + PgXmlErrorContext *xmlerrcxt; xpath = pgxml_texttoxmlchar(xpathsupp); + xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); - res = pgxml_xpath(document, xpath, &workspace); + PG_TRY(); + { + workspace = pgxml_xpath(document, xpath, xmlerrcxt); + xpres = pgxml_result_to_text(workspace->res, NULL, NULL, plainsep); + } + PG_CATCH(); + { + if (workspace) + cleanup_workspace(workspace); - xpres = pgxml_result_to_text(res, NULL, NULL, plainsep); + pg_xml_done(xmlerrcxt, true); + PG_RE_THROW(); + } + PG_END_TRY(); - cleanup_workspace(&workspace); + cleanup_workspace(workspace); + pg_xml_done(xmlerrcxt, false); pfree(xpath); @@ -269,8 +345,8 @@ xpath_string(PG_FUNCTION_ARGS) xmlChar *xpath; int32 pathsize; text *xpres; - xmlXPathObjectPtr res; - xpath_workspace workspace; + volatile xpath_workspace *workspace; + PgXmlErrorContext *xmlerrcxt; pathsize = VARSIZE_ANY_EXHDR(xpathsupp); @@ -286,11 +362,25 @@ xpath_string(PG_FUNCTION_ARGS) xpath[pathsize + 7] = ')'; xpath[pathsize + 8] = '\0'; - res = pgxml_xpath(document, xpath, &workspace); + xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); + + PG_TRY(); + { + workspace = pgxml_xpath(document, xpath, xmlerrcxt); + xpres = pgxml_result_to_text(workspace->res, NULL, NULL, NULL); + } + PG_CATCH(); + { + if (workspace) + cleanup_workspace(workspace); - xpres = pgxml_result_to_text(res, NULL, NULL, NULL); + pg_xml_done(xmlerrcxt, true); + PG_RE_THROW(); + } + PG_END_TRY(); - cleanup_workspace(&workspace); + cleanup_workspace(workspace); + pg_xml_done(xmlerrcxt, false); pfree(xpath); @@ -308,24 +398,38 @@ xpath_number(PG_FUNCTION_ARGS) text *document = PG_GETARG_TEXT_PP(0); text *xpathsupp = PG_GETARG_TEXT_PP(1); /* XPath expression */ xmlChar *xpath; - float4 fRes; - xmlXPathObjectPtr res; - xpath_workspace workspace; + float4 fRes = 0.0; + bool isNull = false; + volatile xpath_workspace *workspace = NULL; + PgXmlErrorContext *xmlerrcxt; xpath = pgxml_texttoxmlchar(xpathsupp); + xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); - res = pgxml_xpath(document, xpath, &workspace); - - pfree(xpath); + PG_TRY(); + { + workspace = pgxml_xpath(document, xpath, xmlerrcxt); + pfree(xpath); - if (res == NULL) - PG_RETURN_NULL(); + if (workspace->res == NULL) + isNull = true; + else + fRes = xmlXPathCastToNumber(workspace->res); + } + PG_CATCH(); + { + if (workspace) + cleanup_workspace(workspace); - fRes = xmlXPathCastToNumber(res); + pg_xml_done(xmlerrcxt, true); + PG_RE_THROW(); + } + PG_END_TRY(); - cleanup_workspace(&workspace); + cleanup_workspace(workspace); + pg_xml_done(xmlerrcxt, false); - if (xmlXPathIsNaN(fRes)) + if (isNull || xmlXPathIsNaN(fRes)) PG_RETURN_NULL(); PG_RETURN_FLOAT4(fRes); @@ -341,21 +445,34 @@ xpath_bool(PG_FUNCTION_ARGS) text *xpathsupp = PG_GETARG_TEXT_PP(1); /* XPath expression */ xmlChar *xpath; int bRes; - xmlXPathObjectPtr res; - xpath_workspace workspace; + volatile xpath_workspace *workspace = NULL; + PgXmlErrorContext *xmlerrcxt; xpath = pgxml_texttoxmlchar(xpathsupp); + xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); - res = pgxml_xpath(document, xpath, &workspace); - - pfree(xpath); + PG_TRY(); + { + workspace = pgxml_xpath(document, xpath, xmlerrcxt); + pfree(xpath); - if (res == NULL) - PG_RETURN_BOOL(false); + if (workspace->res == NULL) + bRes = 0; + else + bRes = xmlXPathCastToBoolean(workspace->res); + } + PG_CATCH(); + { + if (workspace) + cleanup_workspace(workspace); - bRes = xmlXPathCastToBoolean(res); + pg_xml_done(xmlerrcxt, true); + PG_RE_THROW(); + } + PG_END_TRY(); - cleanup_workspace(&workspace); + cleanup_workspace(workspace); + pg_xml_done(xmlerrcxt, false); PG_RETURN_BOOL(bRes); } @@ -364,62 +481,44 @@ xpath_bool(PG_FUNCTION_ARGS) /* Core function to evaluate XPath query */ -static xmlXPathObjectPtr -pgxml_xpath(text *document, xmlChar *xpath, xpath_workspace *workspace) +static xpath_workspace * +pgxml_xpath(text *document, xmlChar *xpath, PgXmlErrorContext *xmlerrcxt) { int32 docsize = VARSIZE_ANY_EXHDR(document); - PgXmlErrorContext *xmlerrcxt; xmlXPathCompExprPtr comppath; + xpath_workspace *workspace = (xpath_workspace *) + palloc0(sizeof(xpath_workspace)); workspace->doctree = NULL; workspace->ctxt = NULL; workspace->res = NULL; - xmlerrcxt = pgxml_parser_init(PG_XML_STRICTNESS_LEGACY); - - PG_TRY(); + workspace->doctree = xmlReadMemory((char *) VARDATA_ANY(document), + docsize, NULL, NULL, + XML_PARSE_NOENT); + if (workspace->doctree != NULL) { - workspace->doctree = xmlReadMemory((char *) VARDATA_ANY(document), - docsize, NULL, NULL, - XML_PARSE_NOENT); - if (workspace->doctree != NULL) - { - workspace->ctxt = xmlXPathNewContext(workspace->doctree); - workspace->ctxt->node = xmlDocGetRootElement(workspace->doctree); - - /* compile the path */ - comppath = xmlXPathCtxtCompile(workspace->ctxt, xpath); - if (comppath == NULL) - xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_ARGUMENT_FOR_XQUERY, - "XPath Syntax Error"); + workspace->ctxt = xmlXPathNewContext(workspace->doctree); + workspace->ctxt->node = xmlDocGetRootElement(workspace->doctree); - /* Now evaluate the path expression. */ - workspace->res = xmlXPathCompiledEval(comppath, workspace->ctxt); + /* compile the path */ + comppath = xmlXPathCtxtCompile(workspace->ctxt, xpath); + if (comppath == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_ARGUMENT_FOR_XQUERY, + "XPath Syntax Error"); - xmlXPathFreeCompExpr(comppath); - } - } - PG_CATCH(); - { - cleanup_workspace(workspace); - - pg_xml_done(xmlerrcxt, true); + /* Now evaluate the path expression. */ + workspace->res = xmlXPathCompiledEval(comppath, workspace->ctxt); - PG_RE_THROW(); + xmlXPathFreeCompExpr(comppath); } - PG_END_TRY(); - if (workspace->res == NULL) - cleanup_workspace(workspace); - - pg_xml_done(xmlerrcxt, false); - - return workspace->res; + return workspace; } /* Clean up after processing the result of pgxml_xpath() */ static void -cleanup_workspace(xpath_workspace *workspace) +cleanup_workspace(volatile xpath_workspace *workspace) { if (workspace->res) xmlXPathFreeObject(workspace->res); @@ -438,34 +537,59 @@ pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *septag, xmlChar *plainsep) { - xmlChar *xpresstr; + volatile xmlChar *xpresstr = NULL; + PgXmlErrorContext *xmlerrcxt; text *xpres; if (res == NULL) return NULL; - switch (res->type) - { - case XPATH_NODESET: - xpresstr = pgxmlNodeSetToText(res->nodesetval, - toptag, - septag, plainsep); - break; + /* spin some error handling */ + xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL); - case XPATH_STRING: - xpresstr = xmlStrdup(res->stringval); - break; + PG_TRY(); + { + switch (res->type) + { + case XPATH_NODESET: + xpresstr = pgxmlNodeSetToText(res->nodesetval, + toptag, + septag, plainsep); + break; + + case XPATH_STRING: + xpresstr = xmlStrdup(res->stringval); + if (xpresstr == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate result"); + break; + + default: + elog(NOTICE, "unsupported XQuery result: %d", res->type); + xpresstr = xmlStrdup((const xmlChar *) ""); + if (xpresstr == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate result"); + } - default: - elog(NOTICE, "unsupported XQuery result: %d", res->type); - xpresstr = xmlStrdup((const xmlChar *) ""); + /* Now convert this result back to text */ + xpres = cstring_to_text((char *) xpresstr); } + PG_CATCH(); + { + if (xpresstr != NULL) + xmlFree((xmlChar *) xpresstr); - /* Now convert this result back to text */ - xpres = cstring_to_text((char *) xpresstr); + pg_xml_done(xmlerrcxt, true); + + PG_RE_THROW(); + } + PG_END_TRY(); /* Free various storage */ - xmlFree(xpresstr); + xmlFree((xmlChar *) xpresstr); + + pg_xml_done(xmlerrcxt, false); return xpres; } @@ -648,11 +772,16 @@ xpath_table(PG_FUNCTION_ARGS) for (j = 0; j < numpaths; j++) { ctxt = xmlXPathNewContext(doctree); + if (ctxt == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, + ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate XPath context"); + ctxt->node = xmlDocGetRootElement(doctree); /* compile the path */ comppath = xmlXPathCtxtCompile(ctxt, xpaths[j]); - if (comppath == NULL) + if (comppath == NULL || pg_xml_error_occurred(xmlerrcxt)) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_ARGUMENT_FOR_XQUERY, "XPath Syntax Error"); @@ -671,6 +800,10 @@ xpath_table(PG_FUNCTION_ARGS) rownr < res->nodesetval->nodeNr) { resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); + if (resstr == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, + ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate result"); had_values = true; } else @@ -680,11 +813,19 @@ xpath_table(PG_FUNCTION_ARGS) case XPATH_STRING: resstr = xmlStrdup(res->stringval); + if (resstr == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, + ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate result"); break; default: elog(NOTICE, "unsupported XQuery result: %d", res->type); resstr = xmlStrdup((const xmlChar *) ""); + if (resstr == NULL || pg_xml_error_occurred(xmlerrcxt)) + xml_ereport(xmlerrcxt, + ERROR, ERRCODE_OUT_OF_MEMORY, + "could not allocate result"); } /* diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c index b720d89f754ae..c8e7dd45ed5b4 100644 --- a/contrib/xml2/xslt_proc.c +++ b/contrib/xml2/xslt_proc.c @@ -58,7 +58,7 @@ xslt_process(PG_FUNCTION_ARGS) volatile xsltSecurityPrefsPtr xslt_sec_prefs = NULL; volatile xsltTransformContextPtr xslt_ctxt = NULL; volatile int resstat = -1; - xmlChar *resstr = NULL; + volatile xmlChar *resstr = NULL; int reslen = 0; if (fcinfo->nargs == 3) @@ -86,7 +86,7 @@ xslt_process(PG_FUNCTION_ARGS) VARSIZE_ANY_EXHDR(doct), NULL, NULL, XML_PARSE_NOENT); - if (doctree == NULL) + if (doctree == NULL || pg_xml_error_occurred(xmlerrcxt)) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_DOCUMENT, "error parsing XML document"); @@ -95,14 +95,14 @@ xslt_process(PG_FUNCTION_ARGS) VARSIZE_ANY_EXHDR(ssheet), NULL, NULL, XML_PARSE_NOENT); - if (ssdoc == NULL) + if (ssdoc == NULL || pg_xml_error_occurred(xmlerrcxt)) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_DOCUMENT, "error parsing stylesheet as XML document"); /* After this call we need not free ssdoc separately */ stylesheet = xsltParseStylesheetDoc(ssdoc); - if (stylesheet == NULL) + if (stylesheet == NULL || pg_xml_error_occurred(xmlerrcxt)) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_ARGUMENT_FOR_XQUERY, "failed to parse stylesheet"); @@ -137,11 +137,15 @@ xslt_process(PG_FUNCTION_ARGS) restree = xsltApplyStylesheetUser(stylesheet, doctree, params, NULL, NULL, xslt_ctxt); - if (restree == NULL) + if (restree == NULL || pg_xml_error_occurred(xmlerrcxt)) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_ARGUMENT_FOR_XQUERY, "failed to apply stylesheet"); - resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet); + resstat = xsltSaveResultToString((xmlChar **) &resstr, &reslen, + restree, stylesheet); + + if (resstat >= 0) + result = cstring_to_text_with_len((char *) resstr, reslen); } PG_CATCH(); { @@ -155,6 +159,8 @@ xslt_process(PG_FUNCTION_ARGS) xsltFreeStylesheet(stylesheet); if (doctree != NULL) xmlFreeDoc(doctree); + if (resstr != NULL) + xmlFree((xmlChar *) resstr); xsltCleanupGlobals(); pg_xml_done(xmlerrcxt, true); @@ -170,17 +176,15 @@ xslt_process(PG_FUNCTION_ARGS) xmlFreeDoc(doctree); xsltCleanupGlobals(); + if (resstr) + xmlFree((xmlChar *) resstr); + pg_xml_done(xmlerrcxt, false); /* XXX this is pretty dubious, really ought to throw error instead */ if (resstat < 0) PG_RETURN_NULL(); - result = cstring_to_text_with_len((char *) resstr, reslen); - - if (resstr) - xmlFree(resstr); - PG_RETURN_TEXT_P(result); #else /* !USE_LIBXSLT */ From 8fd9bb1d9654c59d40613232ad964e9a648e4202 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 1 Jul 2025 09:36:33 +0200 Subject: [PATCH 111/181] Enable MSVC conforming preprocessor Switch MSVC to use the conforming preprocessor, using the /Zc:preprocessor option. This allows us to drop the alternative implementation of VA_ARGS_NARGS() for the previous "traditional" preprocessor. This also prepares the way for enabling C11 mode in the future, which enables the conforming preprocessor by default. This now requires Visual Studio 2019. The installation documentation is adjusted accordingly. Discussion: https://www.postgresql.org/message-id/flat/01a69441-af54-4822-891b-ca28e05b215a%40eisentraut.org --- doc/src/sgml/installation.sgml | 8 ++------ meson.build | 4 ++++ src/include/c.h | 19 ------------------- 3 files changed, 6 insertions(+), 25 deletions(-) diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index de19f3ad92952..cb53530cc4fa8 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -3847,17 +3847,13 @@ make: *** [postgres] Error 1 Both 32-bit and 64-bit builds are possible with the Microsoft Compiler suite. 32-bit PostgreSQL builds are possible with - Visual Studio 2015 to + Visual Studio 2019 to Visual Studio 2022, as well as standalone Windows SDK releases 10 and above. 64-bit PostgreSQL builds are supported with Microsoft Windows SDK version 10 and above or - Visual Studio 2015 and above. + Visual Studio 2019 and above.