8000 fix test_set_per_process_memory_fraction failure caused by knock-on e… · pytorch/pytorch@c85cb9c · GitHub
[go: up one dir, main page]

8000
Skip to content

Commit c85cb9c

Browse files
committed
fix test_set_per_process_memory_fraction failure caused by knock-on effect of test_out_of_memory_retry
1 parent c33ce86 commit c85cb9c

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

c10/cuda/CUDACachingAllocator.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,6 @@ namespace Native {
9191
* notifyCaptureDestroy.
9292
*/
9393

94-
namespace {
95-
9694
constexpr size_t kMinBlockSize =
9795
512; // all sizes are rounded to at least 512 bytes
9896
constexpr size_t kSmallSize = 1048576; // largest "small" allocation is 1 MiB

test/test_cuda.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,7 @@ def test_out_of_memory(self):
378378
self.assertTrue((tensor == 1).all())
379379

380380
def test_out_of_memory_retry(self):
381+
torch.cuda.empty_cache()
381382
total_memory = torch.cuda.get_device_properties(0).total_memory
382383
oom_regex = "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else \
383384
"Tried to allocate"
@@ -387,6 +388,10 @@ def test_out_of_memory_retry(self):
387388
b = torch.empty(size, dtype=torch.int8, device='cuda')
388389
del a
389390
b = torch.empty(size, dtype=torch.int8, device='cuda')
391+
del b
392+
# We used a lot of memory here, clean up so we don't affect other tests too much
393+
torch.cuda.empty_cache()
394+
torch.cuda.reset_peak_memory_stats()
390395

391396
def test_set_per_process_memory_fraction(self):
392397
# test invalid fraction value.

0 commit comments

Comments
 (0)
0