E575 Fix max_width computation in _tensor_str._Formatter (#126859) · pytorch/pytorch@1465757 · GitHub
[go: up one dir, main page]

Skip to content

Commit 1465757

Browse files
raphaelremepytorchmergebot
authored andcommitted
Fix max_width computation in _tensor_str._Formatter (#126859)
Previous version of `torch._tensor_str._Formatter` was not using `PRINT_OPTS.sci_mode` for the `max_width` computation but was using it for the formatting of values leading to a weird discrepancy. Now, the code first checks if it should be in sci_mode, then compute `max_width` Here is an example to test the behavior: ```python A = torch.tensor([10, 1e-1, 1e-2]) B = torch.tensor([10, 1e-1, 1e-1]) print("================= Default =================") print(A, f"Formatter max_width: {torch._tensor_str._Formatter(A).max_width}") print(B, f"Formatter max_width: {torch._tensor_str._Formatter(B).max_width}") print("================= sci_mode=False =================") with torch._tensor_str.printoptions(sci_mode=False): print(A, f"Formatter max_width: {torch._tensor_str._Formatter(A).max_width}") print(B, f"Formatter max_width: {torch._tensor_str._Formatter(B).max_width}") print("================= sci_mode=True =================") with torch._tensor_str.printoptions(sci_mode=True): print(A, f"Formatter max_width: {torch._tensor_str._Formatter(A).max_width}") print(B, f"Formatter max_width: {torch._tensor_str._Formatter(B).max_width}") ``` In the current version this prints: ``` ================= Default ================= tensor([1.0000e+01, 1.0000e-01, 1.0000e-02]) Formatter max_width: 10 tensor([10.0000, 0.1000, 0.1000]) Formatter max_width: 7 ================= sci_mode=False ================= tensor([ 10.0000, 0.1000, 0.0100]) Formatter max_width: 10 tensor([10.0000, 0.1000, 0.1000]) Formatter max_width: 7 ================= sci_mode=True ================= tensor([1.0000e+01, 1.0000e-01, 1.0000e-02]) Formatter max_width: 10 tensor([1.0000e+01, 1.0000e-01, 1.0000e-01]) Formatter max_width: 7 ``` On can see that in `sci_mode=False`, the values of A are prefixed with unneeded 0 and does not have the same `max_width` as B (It keeps the `max_width` from `sci_mode = None`) Also in `sci_mode = True`, for B, the `max_width` is 7 but each value takes 10 chars... (But it is fine as the code that uses `max_width` do not rely much on it, but still, this is missleading) After this commit, this will print ``` ================= Default ================= tensor([1.0000e+01, 1.0000e-01, 1.0000e-02]) Formatter max_width: 10 tensor([10.0000, 0.1000, 0.1000]) Formatter max_width: 7 ================= sci_mode=False ================= tensor([10.0000, 0.1000, 0.0100]) Formatter max_width: 7 tensor([10.0000, 0.1000, 0.1000]) Formatter max_width: 7 ================= sci_mode=True ================= tensor([1.0000e+01, 1.0000e-01, 1.0000e-02]) Formatter max_width: 10 tensor([1.0000e+01, 1.0000e-01, 1.0000e-01]) Formatter max_width: 10 ``` This also allows to align A with B for `sci_mode=False`. Pull Request resolved: #126859 Approved by: https://github.com/malfet
1 parent 17b9c61 commit 1465757

File tree

2 files changed

+11
-15
lines changed

2 files changed

+11
-15
lines changed

test/test_torch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8337,7 +8337,7 @@ def test_print(self):
83378337
self.assertExpectedInline(str(x), '''tensor([1.0000e+02, 1.0000e-02])''')
83388338
torch.set_printoptions(sci_mode=False)
83398339
self.assertEqual(x.__repr__(), str(x))
8340-
self.assertExpectedInline(str(x), '''tensor([ 100.0000, 0.0100])''')
8340+
self.assertExpectedInline(str(x), '''tensor([100.0000, 0.0100])''')
83418341
torch.set_printoptions(sci_mode=None) # reset to the default value
83428342

83438343
# test no leading space if all elements positive

torch/_tensor_str.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -178,14 +178,18 @@ def __init__(self, tensor):
178178
self.int_mode = False
179179
break
180180

181+
self.sci_mode = (
182+
nonzero_finite_max / nonzero_finite_min > 1000.0
183+
or nonzero_finite_max > 1.0e8
184+
or nonzero_finite_min < 1.0e-4
185+
if PRINT_OPTS.sci_mode is None
186+
else PRINT_OPTS.sci_mode
187+
)
188+
181189
if self.int_mode:
182190
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
183191
# to indicate that the tensor is of floating type. add 1 to the len to account for this.
184-
if (
185-
nonzero_finite_max / nonzero_finite_min > 1000.0
186-
or nonzero_finite_max > 1.0e8
187-
):
188-
self.sci_mode = True
192+
if self.sci_mode:
189193
for value in nonzero_finite_vals:
190194
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
191195
self.max_width = max(self.max_width, len(value_str))
@@ -195,12 +199,7 @@ def __init__(self, tensor):
195199
self.max_width = max(self.max_width, len(value_str) + 1)
196200
else:
197201
# Check if scientific representation should be used.
198-
if (
199-
nonzero_finite_max / nonzero_finite_min > 1000.0
200-
or nonzero_finite_max > 1.0e8
201-
or nonzero_finite_min < 1.0e-4
202-
):
203-
self.sci_mode = True
202+
if self.sci_mode:
204203
for value in nonzero_finite_vals:
205204
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
206205
self.max_width = max(self.max_width, len(value_str))
@@ -209,9 +208,6 @@ def __init__(self, tensor):
209208
value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
210209
self.max_width = max(self.max_width, len(value_str))
211210

212-
if PRINT_OPTS.sci_mode is not None:
213-
self.sci_mode = PRINT_OPTS.sci_mode
214-
215211
def width(self):
216212
return self.max_width
217213

0 commit comments

Comments
 (0)
0