8000 Code review for recent slot.c changes. · postgrespro/postgres_cluster@5c21ad0 · GitHub
[go: up one dir, main page]

Skip to content

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 5c21ad0

Browse files
committed
Code review for recent slot.c changes.
1 parent df1a699 commit 5c21ad0

File tree

1 file changed

+22
-26
lines changed

1 file changed

+22
-26
lines changed

src/backend/replication/slot.c

Lines changed: 22 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,7 @@ ReplicationSlotRelease(void)
410410
* Cleanup all temporary slots created in current session.
411411
*/
412412
void
413-
ReplicationSlotCleanup()
413+
ReplicationSlotCleanup(void)
414414
{
415415
int i;
416416

@@ -802,12 +802,12 @@ ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive)
802802
* pg_database oid for the database to prevent creation of new slots on the db
803803
* or replay from existing slots.
804804
*
805-
* This routine isn't as efficient as it could be - but we don't drop databases
806-
* often, especially databases with lots of slots.
807-
*
808805
* Another session that concurrently acquires an existing slot on the target DB
809806
* (most likely to drop it) may cause this function to ERROR. If that happens
810807
* it may have dropped some but not all slots.
808+
*
809+
* This routine isn't as efficient as it could be - but we don't drop
810+
* databases often, especially databases with lots of slots.
811811
*/
812812
void
813813
ReplicationSlotsDropDBSlots(Oid dboid)
@@ -822,7 +822,7 @@ ReplicationSlotsDropDBSlots(Oid dboid)
822822
for (i = 0; i < max_replication_slots; i++)
823823
{
824824
ReplicationSlot *s;
825-
NameData slotname;
825+
char *slotname;
826826
int active_pid;
827827

828828
s = &ReplicationSlotCtl->replication_slots[i];
@@ -839,10 +839,10 @@ ReplicationSlotsDropDBSlots(Oid dboid)
839839
if (s->data.database != dboid)
840840
continue;
841841

842-
/* Claim the slot, as if ReplicationSlotAcquire()ing. */
842+
/* acquire slot, so ReplicationSlotDropAcquired can be reused */
843843
SpinLockAcquire(&s->mutex);
844-
strncpy(NameStr(slotname), NameStr(s->data.name), NAMEDATALEN);
845-
NameStr(slotname)[NAMEDATALEN-1] = '\0';
844+
/* can't change while ReplicationSlotControlLock is held */
845+
slotname = NameStr(s->data.name);
846846
active_pid = s->active_pid;
847847
if (active_pid == 0)
848848
{
@@ -852,36 +852,32 @@ ReplicationSlotsDropDBSlots(Oid dboid)
852852
SpinLockRelease(&s->mutex);
853853

854854
/*
855-
* We might fail here if the slot was active. Even though we hold an
856-
* exclusive lock on the database object a logical slot for that DB can
857-
* still be active if it's being dropped by a backend connected to
858-
* another DB or is otherwise acquired.
855+
* Even though we hold an exclusive lock on the database object a
856+
* logical slot for that DB can still be active, e.g. if it's
857+
* concurrently being dropped by a backend connected to another DB.
859858
*
860-
* It's an unlikely race that'll only arise from concurrent user action,
861-
* so we'll just bail out.
859+
* That's fairly unlikely in practice, so we'll just bail out.
862860
*/
863861
if (active_pid)
864-
elog(ERROR, "replication slot %s is in use by pid %d",
865-
NameStr(slotname), active_pid);
862+
ereport(ERROR,
863+
(errcode(ERRCODE_OBJECT_IN_USE),
864+
errmsg("replication slot \"%s\" is active for PID %d",
865+
slotname, active_pid)));
866866

867867
/*
868-
* To avoid largely duplicating ReplicationSlotDropAcquired() or
869-
* complicating it with already_locked flags for ProcArrayLock,
870-
* ReplicationSlotControlLock and ReplicationSlotAllocationLock, we
871-
* just release our ReplicationSlotControlLock to drop the slot.
868+
* To avoid duplicating ReplicationSlotDropAcquired() and to avoid
869+
* holding ReplicationSlotControlLock over filesystem operations,
870+
* release ReplicationSlotControlLock and use
871+
* ReplicationSlotDropAcquired.
872872
*
873-
* For safety we'll restart our scan from the beginning each
874-
* time we release the lock.
873+
* As that means the set of slots could change, restart scan from the
874+
* beginning each time we release the lock.
875875
*/
876876
LWLockRelease(ReplicationSlotControlLock);
877877
ReplicationSlotDropAcquired();
878878
goto restart;
879879
}
880880
LWLockRelease(ReplicationSlotControlLock);
881-
882-
/* recompute limits once after all slots are dropped */
883-
ReplicationSlotsComputeRequiredXmin(false);
884-
ReplicationSlotsComputeRequiredLSN();
885881
}
886882

887883

0 commit comments

Comments
 (0)
0