8000 [Accelerator] Fix Python typing in accelerator by cyyever · Pull Request #152394 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

[Accelerator] Fix Python typing in accelerator #152394

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 8 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 6 additions & 7 deletions torch/accelerator/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
This package introduces support for the current :ref:`accelerator<accelerators>` in python.
"""

from typing import Literal, Optional
from typing import Optional
from typing_extensions import deprecated

import torch
Expand Down Expand Up @@ -33,7 +33,7 @@ def device_count() -> int:
If there is no available accelerators, return 0.

.. note:: This API delegates to the device-specific version of `device_count`.
On CUDA, this API will NOT posion fork if NVML discovery succeeds.
On CUDA, this API will NOT poison fork if NVML discovery succeeds.
Otherwise, it will. For more details, see :ref:`multiprocessing-poison-fork-note`.
"""
acc = current_accelerator()
Expand Down Expand Up @@ -129,7 +129,7 @@ def set_device_index(device: _device_t, /) -> None:

.. note:: This function is a no-op if this device index is negative.
"""
device_index = _get_device_index(device)
device_index = _get_device_index(device, optional=False)
torch._C._accelerator_setDeviceIndex(device_index)


Expand All @@ -150,7 +150,7 @@ def current_stream(device: _device_t = None, /) -> torch.Stream:
Returns:
torch.Stream: the currently selected stream for a given device.
"""
device_index = _get_device_index(device, True)
device_index = _get_device_index(device, optional=True)
return torch._C._accelerator_getStream(device_index)


Expand Down Expand Up @@ -188,7 +188,7 @@ def synchronize(device: _device_t = None, /) -> None:
>>> torch.accelerator.synchronize()
>>> elapsed_time_ms = start_event.elapsed_time(end_event)
"""
device_index = _get_device_index(device, True)
device_index = _get_device_index(device, optional=True)
torch._C._accelerator_synchronizeDevice(device_index)


Expand Down Expand Up @@ -224,7 +224,6 @@ def __enter__(self) -> None:
if self.idx is not None:
self.prev_idx = torch._C._accelerator_exchangeDevice(self.idx)

def __exit__(self, *args: object) -> Literal[False]:
def __exit__(self, *exc_info: object) -> None:
if self.idx is not None:
torch._C._accelerator_maybeExchangeDevice(self.prev_idx)
return False
Loading
0