8000 Update on "eliminate generated_cpu_cpp and generated_cuda_cpp from Ba… · pytorch/pytorch@a091e25 · GitHub
[go: up one dir, main page]

Skip to content

Commit a091e25

Browse files
author
Michael Andreas Dagitses
committed
Update on "eliminate generated_cpu_cpp and generated_cuda_cpp from Bazel"
These are now redundant with shared variables. Differential Revision: [D36492536](https://our.in 8000 ternmc.facebook.com/intern/diff/D36492536/) [ghstack-poisoned]
2 parents 9aae341 + 6770bcd commit a091e25

File tree

94 files changed

+2420
-1313
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

94 files changed

+2420
-1313
lines changed

.circleci/scripts/binary_linux_test.sh

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,7 @@ if [[ "\$python_nodot" = *39* ]]; then
5353
NUMPY_PIN=">=1.20"
5454
fi
5555
56-
if [[ "$DESIRED_CUDA" == "cu116" ]]; then
57-
EXTRA_CONDA_FLAGS="-c=conda-forge"
58-
fi
56+
5957
6058
# Move debug wheels out of the the package dir so they don't get installed
6159
mkdir -p /tmp/debug_final_pkgs
@@ -88,13 +86,14 @@ if [[ "$PACKAGE_TYPE" == conda ]]; then
8886
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
8987
retry conda install -c pytorch -y cpuonly
9088
else
91-
# DESIRED_CUDA is in format cu90 or cu102
92-
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
93-
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
94-
else
95-
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
89+
90+
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
91+
CUDA_PACKAGE="cudatoolkit"
92+
if [[ "$DESIRED_CUDA" == "cu116" ]]; then
93+
CUDA_PACKAGE="cuda"
9694
fi
97-
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "cudatoolkit=\${cu_ver}"
95+
96+
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "\${CUDA_PACKAGE}=\${cu_ver}"
9897
fi
9998
conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
10099
)

.github/ci_commit_pins/xla.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
35f759fdd7eb585679df7c1e6db4569b1aba5475
1+
de45c7c503f403be2c85066013b6a860f04f1152

.github/scripts/test_trymerge.py

Lines changed: 44 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,16 @@
1111
import os
1212
from hashlib import sha256
1313

14-
from trymerge import find_matching_merge_rule, gh_graphql, gh_get_team_members, GitHubPR, MergeRule, MandatoryChecksMissingError
14+
from trymerge import (find_matching_merge_rule,
15+
gh_graphql,
16+
gh_get_team_members,
17+
read_merge_rules,
18+
GitHubPR,
19+
MergeRule,
20+
MandatoryChecksMissingError,
21+
main as trymerge_main)
1522
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
16-
from typing import cast, Any, List, Optional
23+
from typing import Any, List, Optional
1724
from unittest import TestCase, main, mock
1825
from urllib.error import HTTPError
1926

@@ -91,35 +98,48 @@ def mock_gh_get_info() -> Any:
9198
return {"closed": False, "isCrossRepository": False}
9299

93100

94-
def mocked_read_merge_rules(repo: Optional[GitRepo], org: str, project: str) -> List[MergeRule]:
95-
mock_merge_rules = """
96-
[
97-
{
98-
"name": "mock with nonexistent check",
99-
"patterns": ["*"],
100-
"approved_by": [],
101-
"mandatory_checks_name": [
102-
"Facebook CLA Check",
103-
"Lint",
104-
"nonexistent"
105-
]
106-
}
101+
def mocked_read_merge_rules_NE(repo: Any, org: str, project: str) -> List[MergeRule]:
102+
return [
103+
MergeRule(name="mock with nonexistent check",
104+
patterns=["*"],
105+
approved_by=[],
106+
mandatory_checks_name=["Lint",
107+
"Facebook CLA Check",
108+
"nonexistent"],
109+
),
110+
]
111+
112+
113+
def mocked_read_merge_rules(repo: Any, org: str, project: str) -> List[MergeRule]:
114+
return [
115+
MergeRule(name="super",
116+
patterns=["*"],
117+
approved_by=["pytorch/metamates"],
118+
mandatory_checks_name=["Lint",
119+
"Facebook CLA Check",
120+
"linux-xenial-cuda11.3-py3.7-gcc7 / build",
121+
],
122+
),
107123
]
108-
"""
109-
rc = json.loads(mock_merge_rules, object_hook=lambda x: MergeRule(**x))
110-
return cast(List[MergeRule], rc)
111124

112125

113126
class TestGitHubPR(TestCase):
127+
def test_merge_rules_valid(self) -> None:
128+
"Test that merge_rules.json can be parsed"
129+
repo = GitRepo(get_git_repo_dir(), get_git_repo_dir())
130+
self.assertGreater(len(read_merge_rules(repo, "pytorch", "pytorch")), 1)
131+
114132
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
115-
def test_match_rules(self, mocked_gql: Any) -> None:
133+
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules)
134+
def test_match_rules(self, mocked_gql: Any, mocked_rmr: Any) -> None:
116135
"Tests that PR passes merge rules"
117136
pr = GitHubPR("pytorch", "pytorch", 77700)
118137
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
119138
self.assertTrue(find_matching_merge_rule(pr, repo) is not None)
120139

121140
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
122-
def test_lint_fails(self, mocked_gql: Any) -> None:
141+
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules)
142+
def test_lint_fails(self, mocked_gql: Any, mocked_rmr: Any) -> None:
123143
"Tests that PR fails mandatory lint check"
124144
pr = GitHubPR("pytorch", "pytorch", 74649)
125145
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
@@ -206,7 +226,7 @@ def test_get_author_many_commits(self, mocked_gql: Any) -> None:
206226
self.assertGreater(len(authors), 50)
207227
self.assertTrue("@" in pr.get_author())
208228

209-
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules)
229+
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules_NE)
210230
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
211231
def test_pending_status_check(self, mocked_gql: Any, mocked_read_merge_rules: Any) -> None:
212232
""" Tests that PR with nonexistent/pending status checks fails with the right reason.
@@ -239,16 +259,14 @@ def test_get_checkruns_many_runs(self, mocked_gql: Any) -> None:
239259
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(True, False))
240260
@mock.patch('trymerge.try_revert', side_effect=mock_revert)
241261
def test_main_revert(self, mock_revert: Any, mock_parse_args: Any, gh_get_pr_info: Any) -> None:
242-
import trymerge
243-
trymerge.main()
262+
trymerge_main()
244263
mock_revert.assert_called_once()
245264

246265
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
247266
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(False, True))
248267
@mock.patch('trymerge.merge', side_effect=mock_merge)
249268
def test_main_force(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_info: Any) -> None:
250-
import trymerge
251-
trymerge.main()
269+
trymerge_main()
252270
mock_merge.assert_called_once_with(mock.ANY,
253271
mock.ANY,
254272
dry_run=mock.ANY,
@@ -261,8 +279,7 @@ def test_main_force(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_inf
261279
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(False, False))
262280
@mock.patch('trymerge.merge', side_effect=mock_merge)
263281
def test_main_merge(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_info: Any) -> None:
264-
import trymerge
265-
trymerge.main()
282+
trymerge_main()
266283
mock_merge.assert_called_once_with(mock.ANY,
267284
mock.ANY,
268285
dry_run=mock.ANY,

.github/workflows/pull.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,7 @@ jobs:
209209
docker-image-name: xla_base
210210

211211
pytorch-xla-linux-bionic-py3_7-clang8-test:
212+
if: ${{ false }}
212213
name: pytorch-xla-linux-bionic-py3.7-clang8
213214
uses: ./.github/workflows/_linux-test.yml
214215
needs: pytorch-xla-linux-bionic-py3_7-clang8-build

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,10 @@ At a granular level, PyTorch is a library that consists of the following compone
4848

4949
| Component | Description |
5050
| ---- | --- |
51-
| [**torch**](https://pytorch.org/docs/stable/torch.html) | a Tensor library like NumPy, with strong GPU support |
52-
| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | a tape-based automatic differentiation library that supports all differentiable Tensor operations in torch |
53-
| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | a compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code |
54-
| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | a neural networks library deeply integrated with autograd designed for maximum flexibility |
51+
| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support |
52+
| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch |
53+
| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code |
54+
| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility |
5555
| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training |
5656
| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience |
5757

aten/src/ATen/BatchingRegistrations.cpp

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -56,21 +56,18 @@ static bool is_allowed_dim_on_scalar_tensor(int64_t dim) {
5656
return dim == 0 || dim == -1;
5757
}
5858

59-
Tensor sum_batching_rule(const Tensor& self, OptionalIntArrayRef opt_dims, bool keepdim, optional<ScalarType> dtype) {
60-
if (opt_dims.has_value()) {
61-
auto dims = opt_dims.value();
62-
// PyTorch has a special case where sum(scalar_tensor, dim=0) does not fail
63-
// and instead returns a new scalar tensor (this also happens for dim=-1)
64-
// If the following happens:
65-
// >>> x = torch.randn(B0) # the per-examples are all scalars
66-
// >>> vmap(partial(torch.sum, dim=0), x)
67-
// then we replicate the behavior of sum(scalar_tensor, dim=0).
68-
if (/*logical*/self.dim() == 0 && (dims.size() == 0 || (dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0])))) {
69-
return self.clone();
70-
}
59+
Tensor sum_batching_rule(const Tensor& self, IntArrayRef dims, bool keepdim, optional<ScalarType> dtype) {
60+
// PyTorch has a special case where sum(scalar_tensor, dim=0) does not fail
61+
// and instead returns a new scalar tensor (this also happens for dim=-1)
62+
// If the following happens:
63+
// >>> x = torch.randn(B0) # the per-examples are all scalars
64+
// >>> vmap(partial(torch.sum, dim=0), x)
65+
// then we replicate the behavior of sum(scalar_tensor, dim=0).
66+
if (/*logical*/self.dim() == 0 && (dims.size() == 0 || (dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0])))) {
67+
return self.clone();
7168
}
7269
auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self);
73-
auto dims_physical = self_physical.getPhysicalDims(opt_dims);
70+
auto dims_physical = self_physical.getPhysicalDims(dims);
7471
auto result = at::sum(self_physical.tensor(), dims_physical, keepdim, dtype);
7572
return self_physical.getPhysicalToLogicalMap().apply(result);
7673
}

aten/src/ATen/MemoryOverlap.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,26 +13,26 @@ MemOverlap has_internal_overlap(TensorImpl* t) {
1313
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(t->layout() == kStrided);
1414

1515
if (t->is_non_overlapping_and_dense()) {
16-
return MemOverlap::NO;
16+
return MemOverlap::No;
1717
}
1818

1919
auto strides = t->strides();
2020
auto sizes = t->sizes();
2121
for (const auto i : c10::irange(strides.size())) {
2222
if (strides[i] == 0 && sizes[i] > 1) {
23-
return MemOverlap::YES;
23+
return MemOverlap::Yes;
2424
}
2525
}
2626

27-
return MemOverlap::TOO_HARD;
27+
return MemOverlap::TooHard;
2828
}
2929

3030
void assert_no_internal_overlap(const TensorBase& t) {
3131
assert_no_internal_overlap(t.unsafeGetTensorImpl());
3232
}
3333

3434
void assert_no_internal_overlap(TensorImpl* t) {
35-
TORCH_CHECK(has_internal_overlap(t) != MemOverlap::YES,
35+
TORCH_CHECK(has_internal_overlap(t) != MemOverlap::Yes,
3636
"unsupported operation: more than one element of the written-to tensor "
3737
"refers to a single memory location. Please clone() the tensor before "
3838
"performing the operation.");
@@ -43,12 +43,12 @@ MemOverlapStatus get_overlap_status(const TensorBase& a, const TensorBase& b) {
4343
}
4444

4545
MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b) {
46-
if (a == b) return MemOverlapStatus::FULL;
46+
if (a == b) return MemOverlapStatus::Full;
4747
if (a->numel() == 0 || b->numel() == 0) {
48-
return MemOverlapStatus::NO;
48+
return MemOverlapStatus::No;
4949
}
5050
if (!a->is_non_overlapping_and_dense() || !b->is_non_overlapping_and_dense()) {
51-
return MemOverlapStatus::TOO_HARD;
51+
return MemOverlapStatus::TooHard;
5252
}
5353
// Test for storage equality, rather than pointer equality.
5454
// This reduces precision, but if people are aliasing the
@@ -64,21 +64,21 @@ MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b) {
6464

6565
if (a_begin == b_begin && a_end == b_end) {
6666
return (a->strides() == b->strides()) ?
67-
MemOverlapStatus::FULL : MemOverlapStatus::PARTIAL;
67+
MemOverlapStatus::Full : MemOverlapStatus::Partial;
6868
}
6969
if (a_begin < b_end && b_begin < a_end) {
70-
return MemOverlapStatus::PARTIAL;
70+
return MemOverlapStatus::Partial;
7171
}
7272
}
73-
return MemOverlapStatus::NO;
73+
return MemOverlapStatus::No;
7474
}
7575

7676
void assert_no_partial_overlap(const TensorBase& a, const TensorBase& b) {
7777
assert_no_partial_overlap(a.unsafeGetTensorImpl(), b.unsafeGetTensorImpl());
7878
}
7979

8080
void assert_no_partial_overlap(TensorImpl* a, TensorImpl* b) {
81-
TORCH_CHECK(get_overlap_status(a, b) != MemOverlapStatus::PARTIAL,
81+
TORCH_CHECK(get_overlap_status(a, b) != MemOverlapStatus::Partial,
8282
"unsupported operation: some elements of the input tensor and "
8383
"the written-to tensor refer to a single memory location. "
8484
"Please clone() the tensor before performing the operation.");
@@ -90,7 +90,7 @@ void assert_no_overlap(const TensorBase& a, const TensorBase& b) {
9090

9191
void assert_no_overlap(TensorImpl* a, TensorImpl* b) {
9292
const auto lap = get_overlap_status(a, b);
93-
TORCH_CHECK(lap != MemOverlapStatus::PARTIAL && lap != MemOverlapStatus::FULL,
93+
TORCH_CHECK(lap != MemOverlapStatus::Partial && lap != MemOverlapStatus::Full,
9494
"unsupported operation: some elements of the input tensor and "
9595
"the written-to tensor refer to a single memory location. "
9696
"Please clone() the tensor before performing the operation.");

aten/src/ATen/MemoryOverlap.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,14 @@ class TensorBase;
1111

1212
// MemOverlap: Whether or not there is memory overlap
1313
//
14-
// NO: Absolutely no memory overlap
15-
// YES: Absolutely yes memory overlap
16-
// TOO_HARD: There might be memory overlap, but it was too expensive to compute.
14+
// No: Absolutely no memory overlap
15+
// Yes: Absolutely yes memory overlap
16+
// TooHard: There might be memory overlap, but it was too expensive to compute.
1717
//
1818
// NB: Please update the python test for these if you renumber them.
19-
enum class MemOverlap { NO, YES, TOO_HARD };
19+
enum class MemOverlap { No, Yes, TooHard };
2020

21-
enum class MemOverlapStatus { FULL, PARTIAL, NO, TOO_HARD };
21+
enum class MemOverlapStatus { Full, Partial, No, TooHard };
2222

2323
TORCH_API MemOverlap has_internal_overlap(const TensorBase& t);
2424
TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t);

aten/src/ATen/NestedTensorImpl.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,6 @@ NestedTensorImpl::NestedTensorImpl(
5454
TORCH_INTERNAL_ASSERT(nested_size_tensor_.is_contiguous());
5555
int64_t size_dim = nested_size_tensor_.dim();
5656
TORCH_INTERNAL_ASSERT(size_dim == 0 || size_dim == 2);
57-
remove_autograd_key();
58-
key_set_ =
59-
key_set_ - c10::DispatchKeySet({c10::DispatchKey::ADInplaceOrView});
6057
refresh_dim();
6158
set_sizes_strides_policy(c10::TensorImpl::SizesStridesPolicy::CustomSizes);
6259
}

aten/src/ATen/VmapTransforms.cpp

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -55,20 +55,13 @@ int64_t VmapPhysicalView::numLogicalDims() const {
5555
return /*physical*/tensor_.dim() - numBatchDims();
5656
}
5757

58-
VmapDimVector VmapPhysicalView::getPhysicalDims(OptionalIntArrayRef opt_logical_dims) const {
58+
VmapDimVector VmapPhysicalView::getPhysicalDims(IntArrayRef logical_dims) const {
5959
auto logical_ndim = numLogicalDims();
6060
// NB: fmap doesn't have a SmallVector variant, so we don't use it here.
6161
VmapDimVector result;
6262
result.reserve(logical_ndim);
63-
if (opt_logical_dims.has_value()) {
64-
auto logical_dims = opt_logical_dims.value();
65-
for (auto dim : logical_dims) {
66-
result.push_back(maybe_wrap_dim(dim, logical_ndim) + numBatchDims());
67-
}
68-
} else {
69-
for (int64_t dim = 0; dim < logical_ndim; dim++) {
70-
result.push_back(dim + numBatchDims());
71-
}
63+
for (auto dim : logical_dims) {
64+
result.push_back(maybe_wrap_dim(dim, logical_ndim) + numBatchDims());
7265
}
7366
return result;
7467
}

0 commit comments

Comments
 (0)
0