@@ -20963,12 +20963,14 @@ func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20963
20963
// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
20964
20964
// for the existing tensor cannot be re-used, a copy is made and updated.
20965
20965
//
20966
- // If `indices` contains duplicates, then their updates are accumulated (summed) .
20966
+ // If `indices` contains duplicates, then we pick the last update for the index .
20967
20967
//
20968
- // **WARNING**: The order in which updates are applied is nondeterministic, so the
20969
- // output will be nondeterministic if `indices` contains duplicates -- because
20970
- // of some numerical approximation issues, numbers summed in different order
20971
- // may yield different results.
20968
+ // If an out of bound index is found, an error is returned.
20969
+ //
20970
+ // **WARNING**: There are some GPU specific semantics for this operation.
20971
+ // - If an out of bound index is found, the index is ignored.
20972
+ // - The order in which updates are applied is nondeterministic, so the output
20973
+ // will be nondeterministic if `indices` contains duplicates.
20972
20974
//
20973
20975
// `indices` is an integer tensor containing indices into a new tensor of shape
20974
20976
// `shape`. The last dimension of `indices` can be at most the rank of `shape`:
@@ -21028,8 +21030,6 @@ func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21028
21030
// [1 1 1 1]
21029
21031
// [1 1 1 1]]]
21030
21032
//
21031
- // Note that on CPU, if an out of bound index is found, an error is returned.
21032
- // On GPU, if an out of bound index is found, the index is ignored.
21033
21033
//
21034
21034
// Arguments:
21035
21035
// tensor: Tensor to copy/update.
0 commit comments