@@ -2163,26 +2163,31 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2163
2163
*/
2164
2164
heaptup = heap_prepare_insert (relation , tup , xid , cid , options );
2165
2165
2166
+ /*
2167
+ * Find buffer to insert this tuple into. If the page is all visible,
2168
+ * this will also pin the requisite visibility map page.
2169
+ */
2170
+ buffer = RelationGetBufferForTuple (relation , heaptup -> t_len ,
2171
+ InvalidBuffer , options , bistate ,
2172
+ & vmbuffer , NULL );
2173
+
2166
2174
/*
2167
2175
* We're about to do the actual insert -- but check for conflict first, to
2168
2176
* avoid possibly having to roll back work we've just done.
2169
2177
*
2178
+ * This is safe without a recheck as long as there is no possibility of
2179
+ * another process scanning the page between this check and the insert
2180
+ * being visible to the scan (i.e., an exclusive buffer content lock is
2181
+ * continuously held from this point until the tuple insert is visible).
2182
+ *
2170
2183
* For a heap insert, we only need to check for table-level SSI locks. Our
2171
2184
* new tuple can't possibly conflict with existing tuple locks, and heap
2172
2185
* page locks are only consolidated versions of tuple locks; they do not
2173
- * lock "gaps" as index page locks do. So we don't need to identify a
2174
- * buffer before making the call.
2186
+ * lock "gaps" as index page locks do. So we don't need to specify a
2187
+ * buffer when making the call, which makes for a faster check .
2175
2188
*/
2176
2189
CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2177
2190
2178
- /*
2179
- * Find buffer to insert this tuple into. If the page is all visible,
2180
- * this will also pin the requisite visibility map page.
2181
- */
2182
- buffer = RelationGetBufferForTuple (relation , heaptup -> t_len ,
2183
- InvalidBuffer , options , bistate ,
2184
- & vmbuffer , NULL );
2185
-
2186
2191
/* NO EREPORT(ERROR) from here till changes are logged */
2187
2192
START_CRIT_SECTION ();
2188
2193
@@ -2436,13 +2441,26 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2436
2441
2437
2442
/*
2438
2443
* We're about to do the actual inserts -- but check for conflict first,
2439
- * to avoid possibly having to roll back work we've just done.
2444
+ * to minimize the possibility of having to roll back work we've just
2445
+ * done.
2440
2446
*
2441
- * For a heap insert, we only need to check for table-level SSI locks. Our
2442
- * new tuple can't possibly conflict with existing tuple locks, and heap
2447
+ * A check here does not definitively prevent a serialization anomaly;
2448
+ * that check MUST be done at least past the point of acquiring an
2449
+ * exclusive buffer content lock on every buffer that will be affected,
2450
+ * and MAY be done after all inserts are reflected in the buffers and
2451
+ * those locks are released; otherwise there race condition. Since
2452
+ * multiple buffers can be locked and unlocked in the loop below, and it
2453
+ * would not be feasible to identify and lock all of those buffers before
2454
+ * the loop, we must do a final check at the end.
2455
+ *
2456
+ * The check here could be omitted with no loss of correctness; it is
2457
+ * present strictly as an optimization.
2458
+ *
2459
+ * For heap inserts, we only need to check for table-level SSI locks. Our
2460
+ * new tuples can't possibly conflict with existing tuple locks, and heap
2443
2461
* page locks are only consolidated versions of tuple locks; they do not
2444
- * lock "gaps" as index page locks do. So we don't need to identify a
2445
- * buffer before making the call.
2462
+ * lock "gaps" as index page locks do. So we don't need to specify a
2463
+ * buffer when making the call, which makes for a faster check .
2446
2464
*/
2447
2465
CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2448
2466
@@ -2621,6 +2639,22 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2621
2639
ndone += nthispage ;
2622
2640
}
2623
2641
2642
+ /*
2643
+ * We're done with the actual inserts. Check for conflicts again, to
2644
+ * ensure that all rw-conflicts in to these inserts are detected. Without
2645
+ * this final check, a sequential scan of the heap may have locked the
2646
+ * table after the "before" check, missing one opportunity to detect the
2647
+ * conflict, and then scanned the table before the new tuples were there,
2648
+ * missing the other chance to detect the conflict.
2649
+ *
2650
+ * For heap inserts, we only need to check for table-level SSI locks. Our
2651
+ * new tuples can't possibly conflict with existing tuple locks, and heap
2652
+ * page locks are only consolidated versions of tuple locks; they do not
2653
+ * lock "gaps" as index page locks do. So we don't need to specify a
2654
+ * buffer when making the call.
2655
+ */
2656
+ CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2657
+
2624
2658
/*
2625
2659
* If tuples are cachable, mark them for invalidation from the caches in
2626
2660
* case we abort. Note it is OK to do this after releasing the buffer,
@@ -2934,6 +2968,11 @@ heap_delete(Relation relation, ItemPointer tid,
2934
2968
/*
2935
2969
* We're about to do the actual delete -- check for conflict first, to
2936
2970
* avoid possibly having to roll back work we've just done.
2971
+ *
2972
+ * This is safe without a recheck as long as there is no possibility of
2973
+ * another process scanning the page between this check and the delete
2974
+ * being visible to the scan (i.e., an exclusive buffer content lock is
2975
+ * continuously held from this point until the tuple delete is visible).
2937
2976
*/
2938
2977
CheckForSerializableConflictIn (relation , & tp , buffer );
2939
2978
@@ -3561,12 +3600,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3561
3600
goto l2 ;
3562
3601
}
3563
3602
3564
- /*
3565
- * We're about to do the actual update -- check for conflict first, to
3566
- * avoid possibly having to roll back work we've just done.
3567
- */
3568
- CheckForSerializableConflictIn (relation , & oldtup , buffer );
3569
-
3570
3603
/* Fill in transaction status data */
3571
3604
3572
3605
/*
@@ -3755,14 +3788,20 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3755
3788
}
3756
3789
3757
3790
/*
3758
- * We're about to create the new tuple -- check for conflict first, to
3791
+ * We're about to do the actual update -- check for conflict first, to
3759
3792
* avoid possibly having to roll back work we've just done.
3760
3793
*
3761
- * NOTE: For a tuple insert, we only need to check for table locks, since
3762
- * predicate locking at the index level will cover ranges for anything
3763
- * except a table scan. Therefore, only provide the relation.
3794
+ * This is safe without a recheck as long as there is no possibility of
3795
+ * another process scanning the pages between this check and the update
3796
+ * being visible to the scan (i.e., exclusive buffer content lock(s) are
3797
+ * continuously held from this point until the tuple update is visible).
3798
+ *
3799
+ * For the new tuple the only check needed is at the relation level, but
3800
+ * since both tuples are in the same relation and the check for oldtup
3801
+ * will include checking the relation level, there is no benefit to a
3802
+ * separate check for the new tuple.
3764
3803
*/
3765
- CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
3804
+ CheckForSerializableConflictIn (relation , & oldtup , buffer );
3766
3805
3767
3806
/*
3768
3807
* At this point newbuf and buffer are both pinned and locked, and newbuf
0 commit comments