9292 *
9393 * num_backend_writes is used to count the number of buffer writes performed
9494 * by user backend processes. This counter should be wide enough that it
95- * can't overflow during a single processingbgwriter cycle. num_backend_fsync
95+ * can't overflow during a single processing cycle. num_backend_fsync
9696 * counts the subset of those writes that also had to do their own fsync,
9797 * because the background writer failed to absorb their request.
9898 *
@@ -892,7 +892,7 @@ BgWriterShmemInit(void)
892892 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
893893 * CHECKPOINT_END_OF_RECOVERY).
894894 * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
895- * just signal bgwriter to do it, and return).
895+ * just signal checkpointer to do it, and return).
896896 * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
897897 * (This affects logging, and in particular enables CheckPointWarning.)
898898 */
@@ -928,7 +928,7 @@ RequestCheckpoint(int flags)
928928 /*
929929 * Atomically set the request flags, and take a snapshot of the counters.
930930 * When we see ckpt_started > old_started, we know the flags we set here
931- * have been seen by bgwriter .
931+ * have been seen by checkpointer .
932932 *
933933 * Note that we OR the flags with any existing flags, to avoid overriding
934934 * a "stronger" request by another backend. The flag senses must be
@@ -943,7 +943,7 @@ RequestCheckpoint(int flags)
943943 SpinLockRelease (& bgs -> ckpt_lck );
944944
945945 /*
946- * Send signal to request checkpoint. It's possible that the bgwriter
946+ * Send signal to request checkpoint. It's possible that the checkpointer
947947 * hasn't started yet, or is in process of restarting, so we will retry a
948948 * few times if needed. Also, if not told to wait for the checkpoint to
949949 * occur, we consider failure to send the signal to be nonfatal and merely
@@ -1027,10 +1027,10 @@ RequestCheckpoint(int flags)
10271027
10281028/*
10291029 * ForwardFsyncRequest
1030- * Forward a file-fsync request from a backend to the bgwriter
1030+ * Forward a file-fsync request from a backend to the checkpointer
10311031 *
10321032 * Whenever a backend is compelled to write directly to a relation
1033- * (which should be seldom, if the bgwriter is getting its job done),
1033+ * (which should be seldom, if the checkpointer is getting its job done),
10341034 * the backend calls this routine to pass over knowledge that the relation
10351035 * is dirty and must be fsync'd before next checkpoint. We also use this
10361036 * opportunity to count such writes for statistical purposes.
@@ -1041,7 +1041,7 @@ RequestCheckpoint(int flags)
10411041 * see for details.)
10421042 *
10431043 * To avoid holding the lock for longer than necessary, we normally write
1044- * to the requests[] queue without checking for duplicates. The bgwriter
1044+ * to the requests[] queue without checking for duplicates. The checkpointer
10451045 * will have to eliminate dups internally anyway. However, if we discover
10461046 * that the queue is full, we make a pass over the entire queue to compact
10471047 * it. This is somewhat expensive, but the alternative is for the backend
@@ -1060,7 +1060,7 @@ ForwardFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
10601060 return false; /* probably shouldn't even get here */
10611061
10621062 if (am_checkpointer )
1063- elog (ERROR , "ForwardFsyncRequest must not be called in bgwriter " );
1063+ elog (ERROR , "ForwardFsyncRequest must not be called in checkpointer " );
10641064
10651065 LWLockAcquire (BgWriterCommLock , LW_EXCLUSIVE );
10661066
@@ -1132,7 +1132,7 @@ CompactCheckpointerRequestQueue()
11321132 ctl .keysize = sizeof (BgWriterRequest );
11331133 ctl .entrysize = sizeof (struct BgWriterSlotMapping );
11341134 ctl .hash = tag_hash ;
1135- htab = hash_create ("CompactBgwriterRequestQueue " ,
1135+ htab = hash_create ("CompactCheckpointerRequestQueue " ,
11361136 BgWriterShmem -> num_requests ,
11371137 & ctl ,
11381138 HASH_ELEM | HASH_FUNCTION );
0 commit comments