8000 Enable clang-tidy warnings on aten/src/ATen/cuda/*.cpp (#134547) · pytorch/pytorch@a38e4c5 · GitHub
[go: up one dir, main page]

Skip to content

Commit a38e4c5

Browse files
cyyeverpytorchmergebot
authored andcommitted
Enable clang-tidy warnings on aten/src/ATen/cuda/*.cpp (#134547)
Fixes #ISSUE_NUMBER Pull Request resolved: #134547 Approved by: https://github.com/ezyang
1 parent f276da7 commit a38e4c5

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

.lintrunner.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ include_patterns = [
195195
# and excluding most sub-directories for now.
196196
'aten/src/ATen/*.h',
197197
'aten/src/ATen/*.cpp',
198+
'aten/src/ATen/cuda/*.cpp',
198199
'aten/src/ATen/cpu/*.h',
199200
'aten/src/ATen/cpu/*.cpp',
200201
'aten/src/ATen/core/*.h',
@@ -224,7 +225,6 @@ exclude_patterns = [
224225
# CUDA files are also excluded.
225226
'**/fb/**',
226227
'**/*pb.h',
227-
'aten/**/cuda/*pp',
228228
'c10/xpu/**/*.h',
229229
'c10/xpu/**/*.cpp',
230230
'c10/cuda/CUDAAlgorithm.h',

aten/src/ATen/cuda/CUDAGraph.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ void CUDAGraph::capture_end() {
160160

161161
c10::cuda::CUDACachingAllocator::endAllocateToPool(capture_dev_, mempool_id_);
162162

163-
TORCH_CHECK(graph_ != NULL, "Invalid capture.");
163+
TORCH_CHECK(graph_ != nullptr, "Invalid capture.");
164164
has_graph_ = true;
165165

166166
// In typical graph usage some tensors (e.g. the tensors used for graph IO) are not freed
@@ -175,7 +175,7 @@ void CUDAGraph::capture_end() {
175175
// cudaGraphInstantiateWithFlags
176176
// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__GRAPH.html#group__CUDART__GRAPH_1ga2c652a24ba93e52b99a47bec0888233
177177
#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040)
178-
int version;
178+
int version = 0;
179179
AT_CUDA_CHECK(cudaDriverGetVersion(&version));
180180
if (version < 11040) {
181181
#endif
@@ -203,7 +203,7 @@ void CUDAGraph::capture_end() {
203203
}
204204

205205
size_t numCUDAGraphNodes = 0;
206-
AT_CUDA_CHECK(cudaGraphGetNodes(graph_, NULL, &numCUDAGraphNodes));
206+
AT_CUDA_CHECK(cudaGraphGetNodes(graph_, nullptr, &numCUDAGraphNodes));
207207
if (numCUDAGraphNodes == 0) {
208208
TORCH_WARN("The CUDA Graph is empty. This usually means that the graph was ",
209209
"attempted to be captured on wrong device or stream.");
@@ -233,7 +233,7 @@ void CUDAGraph::replay() {
233233
// graph_exec_ may be replayed in any stream.
234234
AT_CUDA_CHECK(cudaGraphLaunch(graph_exec_, at::cuda::getCurrentCUDAStream()));
235235

236-
int version;
236+
int version = 0;
237237
AT_CUDA_CHECK(cudaDriverGetVersion(&version));
238238
if (version < 11040) {
239239
// Workaround for bug in libcuda.so that causes replayed graphs with

aten/src/ATen/cuda/CUDAGraph.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ struct TORCH_CUDA_CPP_API CUDAGraph {
8282
// in a capture to run on the same device, but this is a limitation of CUDAGraph,
8383
// not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device
8484
// captures if needed.
85-
int capture_dev_;
85+
int capture_dev_{};
8686
};
8787

8888
} // namespace cuda

0 commit comments

Comments
 (0)
0