8000 Reduce the number of semaphores used under --disable-spinlocks. · postgrespro/postgres@daa7527 · GitHub
[go: up one dir, main page]

Skip to content

Commit daa7527

Browse files
committed
Reduce the number of semaphores used under --disable-spinlocks.
Instead of allocating a semaphore from the operating system for every spinlock, allocate a fixed number of semaphores (by default, 1024) from the operating system and multiplex all the spinlocks that get created onto them. This could self-deadlock if a process attempted to acquire more than one spinlock at a time, but since processes aren't supposed to execute anything other than short stretches of straight-line code while holding a spinlock, that shouldn't happen. One motivation for this change is that, with the introduction of dynamic shared memory, it may be desirable to create spinlocks that last for less than the lifetime of the server. Without this change, attempting to use such facilities under --disable-spinlocks would quickly exhaust any supply of available semaphores. Quite apart from that, it's desirable to contain the quantity of semaphores needed to run the server simply on convenience grounds, since using too many may make it harder to get PostgreSQL running on a new platform, which is mostly the point of --disable-spinlocks in the first place. Patch by me; review by Tom Lane.
1 parent 3739e5a commit daa7527

File tree

7 files changed

+74
-23
lines changed
< 8000 ul role="tree" aria-label="File Tree" data-truncate-text="true" class="prc-TreeView-TreeViewRootUlStyles-eZtxW">
  • src
  • 7 files changed

    +74
    -23
    lines changed

    src/backend/postmaster/postmaster.c

    Lines changed: 9 additions & 0 deletions
    Original file line numberDiff line numberDiff line change
    @@ -471,6 +471,9 @@ typedef struct
    471471
    slock_t *ShmemLock;
    472472
    VariableCache ShmemVariableCache;
    473473
    Backend *ShmemBackendArray;
    474+
    #ifndef HAVE_SPINLOCKS
    475+
    PGSemaphore SpinlockSemaArray;
    476+
    #endif
    474477
    LWLock *LWLockArray;
    475478
    slock_t *ProcStructLock;
    476479
    PROC_HDR *ProcGlobal;
    @@ -5626,6 +5629,9 @@ save_backend_variables(BackendParameters *param, Port *port,
    56265629
    param->ShmemVariableCache = ShmemVariableCache;
    56275630
    param->ShmemBackendArray = ShmemBackendArray;
    56285631

    5632+
    #ifndef HAVE_SPINLOCKS
    5633+
    param->SpinlockSemaArray = SpinlockSemaArray;
    5634+
    #endif
    56295635
    param->LWLockArray = LWLockArray;
    56305636
    param->ProcStructLock = ProcStructLock;
    56315637
    param->ProcGlobal = ProcGlobal;
    @@ -5854,6 +5860,9 @@ restore_backend_variables(BackendParameters *param, Port *port)
    58545860
    ShmemVariableCache = param->ShmemVariableCache;
    58555861
    ShmemBackendArray = param->ShmemBackendArray;
    58565862

    5863+
    #ifndef HAVE_SPINLOCKS
    5864+
    SpinlockSemaArray = param->SpinlockSemaArray;
    5865+
    #endif
    58575866
    LWLockArray = param->LWLockArray;
    58585867
    ProcStructLock = param->ProcStructLock;
    58595868
    ProcGlobal = param->ProcGlobal;

    src/backend/storage/ipc/ipci.c

    Lines changed: 1 addition & 0 deletions
    Original file line numberDiff line numberDiff line change
    @@ -105,6 +105,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
    105105
    * need to be so careful during the actual allocation phase.
    106106
    */
    107107
    size = 100000;
    108+
    size = add_size(size, SpinlockSemaSize());
    108109
    size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
    109110
    sizeof(ShmemIndexEnt)));
    110111
    size = add_size(size, BufferShmemSize());

    src/backend/storage/ipc/shmem.c

    Lines changed: 18 additions & 3 deletions
    Original file line numberDiff line numberDiff line change
    @@ -116,9 +116,24 @@ InitShmemAllocation(void)
    116116
    Assert(shmhdr != NULL);
    117117

    118118
    /*
    119-
    * Initialize the spinlock used by ShmemAlloc. We have to do the space
    120-
    * allocation the hard way, since obviously ShmemAlloc can't be called
    121-
    * yet.
    119+
    * If spinlocks are disabled, initialize emulation layer. We have to do
    120+
    * the space allocation the hard way, since obviously ShmemAlloc can't be
    121+
    * called yet.
    122+
    */
    123+
    #ifndef HAVE_SPINLOCKS
    124+
    {
    125+
    PGSemaphore spinsemas;
    126+
    127+
    spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
    128+
    shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
    129+
    SpinlockSemaInit(spinsemas);
    130+
    Assert(shmhdr->freeoffset <= shmhdr->totalsize);
    131+
    }
    132+
    #endif
    133+
    134+
    /*
    135+
    * Initialize the spinlock used by ShmemAlloc; we have to do this the hard
    136+
    * way, too, for the same reasons as above.
    122137
    */
    123138
    ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
    124139
    shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));

    src/backend/storage/lmgr/spin.c

    Lines changed: 31 additions & 19 deletions
    Original file line numberDiff line numberDiff line change
    @@ -29,6 +29,18 @@
    2929
    #include "storage/spin.h"
    3030

    3131

    32+
    PGSemaphore SpinlockSemaArray;
    33+
    34+
    /*
    35+
    * Report the amount of shared memory needed to store semaphores for spinlock
    36+
    * support.
    37+
    */
    38+
    Size
    39+
    SpinlockSemaSize(void)
    40+
    {
    41+
    return SpinlockSemas() * sizeof(PGSemaphoreData);
    42+
    }
    43+
    3244
    #ifdef HAVE_SPINLOCKS
    3345

    3446
    /*
    @@ -52,22 +64,20 @@ SpinlockSemas(void)
    5264
    int
    5365
    SpinlockSemas(void)
    5466
    {
    55-
    int nsemas;
    56-
    57-
    /*
    58-
    * It would be cleaner to distribute this logic into the affected modules,
    59-
    * similar to the way shmem space estimation is handled.
    60-
    *
    61-
    * For now, though, there are few enough users of spinlocks that we just
    62-
    * keep the knowledge here.
    63-
    */
    64-
    nsemas = NumLWLocks(); /* one for each lwlock */
    65-
    nsemas += NBuffers; /* one for each buffer header */
    66-
    nsemas += max_wal_senders; /* one for each wal sender process */
    67-
    nsemas += num_xloginsert_slots; /* one for each WAL insertion slot */
    68-
    nsemas += 30; /* plus a bunch for other small-scale use */
    69-
    70-
    return nsemas;
    67+
    return NUM_SPINLOCK_SEMAPHORES;
    68+
    }
    69+
    70+
    /*
    71+
    * Initialize semaphores.
    72+
    */
    73+
    extern void
    74+
    SpinlockSemaInit(PGSemaphore spinsemas)
    75+
    {
    76+
    int i;
    77+
    78+
    for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
    79+
    PGSemaphoreCreate(&spinsemas[i]);
    80+
    SpinlockSemaArray = spinsemas;
    7181
    }
    7282

    7383
    /*
    @@ -77,13 +87,15 @@ SpinlockSemas(void)
    7787
    void
    7888
    s_init_lock_sema(volatile slock_t *lock)
    7989
    {
    80-
    PGSemaphoreCreate((PGSemaphore) lock);
    90+
    static int counter = 0;
    91+
    92+
    *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
    8193
    }
    8294

    8395
    void
    8496
    s_unlock_sema(volatile slock_t *lock)
    8597
    {
    86-
    PGSemaphoreUnlock((PGSemaphore) lock);
    98+
    PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
    8799
    }
    88100

    89101
    bool
    @@ -98,7 +110,7 @@ int
    98110
    tas_sema(volatile slock_t *lock)
    99111
    {
    100112
    /* Note that TAS macros return 0 if *success* */
    101-
    return !PGSemaphoreTryLock((PGSemaphore) lock);
    113+
    return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
    102114
    }
    103115

    104116
    #endif /* !HAVE_SPINLOCKS */

    src/include/pg_config_manual.h

    Lines changed: 8 additions & 0 deletions
    Original file line numberDiff line numberDiff line change
    @@ -56,6 +56,14 @@
    5656
    */
    5757
    #define NUM_USER_DEFINED_LWLOCKS 4
    5858

    59+
    /*
    60+
    * When we don't have native spinlocks, we use semaphores to simulate them.
    61+
    * Decreasing this value reduces consumption of OS resources; increasing it
    62+
    * may improve performance, but supplying a real spinlock implementation is
    63+
    * probably far better.
    64+
    */
    65+
    #define NUM_SPINLOCK_SEMAPHORES 1024
    66+
    5967
    /*
    6068
    * Define this if you want to allow the lo_import and lo_export SQL
    6169
    * functions to be executed by ordinary users. By default these

    src/include/storage/s_lock.h

    Lines changed: 1 addition & 1 deletion
    Original file line numberDiff line numberDiff line change
    @@ -915,7 +915,7 @@ spin_delay(void)
    915915
    * to fall foul of kernel limits on number of semaphores, so don't use this
    916916
    * unless you must! The subroutines appear in spin.c.
    917917
    */
    918-
    typedef PGSemaphoreData slock_t;
    918+
    typedef int slock_t;
    919919

    920920
    extern bool s_lock_free_sema(volatile slock_t *lock);
    921921
    extern void s_unlock_sema(volatile slock_t *lock);

    src/include/storage/spin.h

    Lines changed: 6 additions & 0 deletions
    Original file line numberDiff line numberDiff line change
    @@ -69,5 +69,11 @@
    6969

    7070

    7171
    extern int SpinlockSemas(void);
    72+
    extern Size SpinlockSemaSize(void);
    73+
    74+
    #ifndef HAVE_SPINLOCKS
    75+
    extern void SpinlockSemaInit(PGSemaphore);
    76+
    extern PGSemaphore SpinlockSemaArray;
    77+
    #endif
    7278

    7379
    #endif /* SPIN_H */

    0 commit comments

    Comments
     (0)
    0