@@ -42,9 +42,9 @@ private DefaultRetryPolicy() {
42
42
}
43
43
44
44
/**
45
- * Defines whether to retry and at which consistency level on a read timeout.
45
+ * {@inheritDoc}
46
46
* <p/>
47
- * This method triggers a maximum of one retry, and only if enough
47
+ * This implementation triggers a maximum of one retry, and only if enough
48
48
* replicas had responded to the read request but data was not retrieved
49
49
* amongst those. Indeed, that case usually means that enough replica
50
50
* are alive to satisfy the consistency but the coordinator picked a
@@ -53,15 +53,6 @@ private DefaultRetryPolicy() {
53
53
* timeout the dead replica will likely have been detected as dead and
54
54
* the retry has a high chance of success.
55
55
*
56
- * @param statement the original query that timed out.
57
- * @param cl the original consistency level of the read that timed out.
58
- * @param requiredResponses the number of responses that were required to
59
- * achieve the requested consistency level.
60
- * @param receivedResponses the number of responses that had been received
61
- * by the time the timeout exception was raised.
62
- * @param dataRetrieved whether actual data (by opposition to data checksum)
63
- * was present in the received responses.
64
- * @param nbRetry the number of retries already performed for this operation.
65
56
* @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and
66
57
* {@code receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} otherwise.
67
58
*/
@@ -74,9 +65,9 @@ public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int
74
65
}
75
66
76
67
/**
77
- * Defines whether to retry and at which consistency level on a write timeout.
68
+ * {@inheritDoc}
78
69
* <p/>
79
- * This method triggers a maximum of one retry, and only in the case of
70
+ * This implementation triggers a maximum of one retry, and only in the case of
80
71
* a {@code WriteType.BATCH_LOG} write. The reasoning for the retry in
81
72
* that case is that write to the distributed batch log is tried by the
82
73
* coordinator of the write against a small subset of all the nodes alive
@@ -86,14 +77,6 @@ public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int
86
77
* nodes will likely have been detected as dead and the retry has thus a
87
78
* high chance of success.
88
79
*
89
- * @param statement the original query that timed out.
90
- * @param cl the original consistency level of the write that timed out.
91
- * @param writeType the type of the write that timed out.
92
- * @param requiredAcks the number of acknowledgments that were required to
93
- * achieve the requested consistency level.
94
- * @param receivedAcks the number of acknowledgments that had been received
95
- * by the time the timeout exception was raised.
96
- * @param nbRetry the number of retry already performed for this operation.
97
80
* @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and
98
81
* {@code writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise.
99
82
*/
@@ -103,37 +86,27 @@ public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, Wr
103
86
return RetryDecision .rethrow ();
104
87
105
88
// If the batch log write failed, retry the operation as this might just be we were unlucky at picking candidates
89
+ // JAVA-764: testing the write type automatically filters out serial consistency levels as these have always WriteType.CAS.
106
90
return writeType == WriteType .BATCH_LOG ? RetryDecision .retry (cl ) : RetryDecision .rethrow ();
107
91
}
108
92
109
93
/**
110
- * Defines whether to retry and at which consistency level on an
111
- * unavailable exception.
94
+ * {@inheritDoc}
112
95
* <p/>
113
- * This method triggers a retry iff no retry has been executed before
114
- * (nbRetry == 0), with
115
- * {@link RetryPolicy.RetryDecision#tryNextHost(ConsistencyLevel) RetryDecision.tryNextHost(cl)},
116
- * otherwise it throws an exception. The retry will be processed on the next host
117
- * in the query plan according to the current Load Balancing Policy.
118
- * Where retrying on the same host in the event of an Unavailable exception
119
- * has almost no chance of success, if the first replica tried happens to
120
- * be "network" isolated from all the other nodes but can still answer to
121
- * the client, it makes sense to retry the query on another node.
122
- *
123
- * @param statement the original query for which the consistency level cannot
124
- * be achieved.
125
- * @param cl the original consistency level for the operation.
126
- * @param requiredReplica the number of replica that should have been
127
- * (known) alive for the operation to be attempted.
128
- * @param aliveReplica the number of replica that were know to be alive by
129
- * the coordinator of the operation.
130
- * @param nbRetry the number of retry already performed for this operation.
131
- * @return {@code RetryDecision.rethrow()}.
96
+ * This implementation does the following:
97
+ * <ul>
98
+ * <li>if this is the first retry ({@code nbRetry == 0}), it triggers a retry on the next host in the query plan
99
+ * with the same consistency level ({@link RetryPolicy.RetryDecision#tryNextHost(ConsistencyLevel) RetryDecision#tryNextHost(null)}.
100
+ * The rationale is that the first coordinator might have been network-isolated from all other nodes (thinking
101
+ * they're down), but still able to communicate with the client; in that case, retrying on the same host has almost
102
+ * no chance of success, but moving to the next host might solve the issue.</li>
103
+ * <li>otherwise, the exception is rethrow.</li>
104
+ * </ul>
132
105
*/
133
106
@ Override
134
107
public RetryDecision onUnavailable (Statement statement , ConsistencyLevel cl , int requiredReplica , int aliveReplica , int nbRetry ) {
135
108
return (nbRetry == 0 )
136
- ? RetryDecision .tryNextHost (cl )
109
+ ? RetryDecision .tryNextHost (null )
137
110
: RetryDecision .rethrow ();
138
111
}
139
112
0 commit comments