8000 bpo-36311: Fixes decoding multibyte characters around chunk boundarie… · python/cpython@7ebdda0 · GitHub
[go: up one dir, main page]

Skip to content

Commit 7ebdda0

Browse files
authored
bpo-36311: Fixes decoding multibyte characters around chunk boundaries and improves decoding performance (GH-15083)
1 parent df0c21f commit 7ebdda0

File tree

3 files changed

+29
-9
lines changed

3 files changed

+29
-9
lines changed

Lib/test/test_codecs.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3075,13 +3075,13 @@ def test_mbcs_alias(self):
30753075
self.assertEqual(codec.name, 'mbcs')
30763076

30773077
@support.bigmemtest(size=2**31, memuse=7, dry_run=False)
3078-
def test_large_input(self):
3078+
def test_large_input(self, size):
30793079
# Test input longer than INT_MAX.
30803080
# Input should contain undecodable bytes before and after
30813081
# the INT_MAX limit.
3082-
encoded = (b'01234567' * (2**28-1) +
3082+
encoded = (b'01234567' * ((size//8)-1) +
30833083
b'\x85\x86\xea\xeb\xec\xef\xfc\xfd\xfe\xff')
3084-
self.assertEqual(len(encoded), 2**31+2)
3084+
self.assertEqual(len(encoded), size+2)
30853085
decoded = codecs.code_page_decode(932, encoded, 'surrogateescape', True)
30863086
self.assertEqual(decoded[1], len(encoded))
30873087
del encoded
@@ -3092,6 +3092,20 @@ def test_large_input(self):
30923092
'\udc85\udc86\udcea\udceb\udcec'
30933093
'\udcef\udcfc\udcfd\udcfe\udcff')
30943094

3095+
@support.bigmemtest(size=2**31, memuse=6, dry_run=False)
3096+
def test_large_utf8_input(self, size):
3097+
# Test input longer than INT_MAX.
3098+
# Input should contain a decodable multi-byte character
3099+
# surrounding INT_MAX
3100+
encoded = (b'0123456\xed\x84\x80' * (size//8))
3101+
self.assertEqual(len(encoded), size // 8 * 10)
3102+
decoded = codecs.code_page_decode(65001, encoded, 'ignore', True)
3103+
self.assertEqual(decoded[1], len(encoded))
3104+
del encoded
3105+
self.assertEqual(len(decoded[0]), size)
3106+
self.assertEqual(decoded[0][:10], '0123456\ud10001')
3107+
self.assertEqual(decoded[0][-11:], '56\ud1000123456\ud100')
3108+
30953109

30963110
class ASCIITest(unittest.TestCase):
30973111
def test_encode(self):
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Decoding bytes objects larger than 2GiB is faster and no longer fails when a
2+
multibyte characters spans a chunk boundary.

Objects/unicodeobject.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7186,6 +7186,12 @@ PyUnicode_AsASCIIString(PyObject *unicode)
71867186
#define NEED_RETRY
71877187
#endif
71887188

7189+
/* INT_MAX is the theoretical largest chunk (or INT_MAX / 2 when
7190+
transcoding from UTF-16), but INT_MAX / 4 perfoms better in
7191+
both cases also and avoids partial characters overrunning the
7192+
length limit in MultiByteToWideChar on Windows */
7193+
#define DECODING_CHUNK_SIZE (INT_MAX/4)
7194+
71897195
#ifndef WC_ERR_INVALID_CHARS
71907196
# define WC_ERR_INVALID_CHARS 0x0080
71917197
#endif
@@ -7422,8 +7428,8 @@ decode_code_page_stateful(int code_page,
74227428
do
74237429
{
74247430
#ifdef NEED_RETRY
7425-
if (size > INT_MAX) {
7426-
chunk_size = INT_MAX;
7431+
if (size > DECODING_CHUNK_SIZE) {
7432+
chunk_size = DECODING_CHUNK_SIZE;
74277433
final = 0;
74287434
done = 0;
74297435
}
@@ -7827,10 +7833,8 @@ encode_code_page(int code_page,
78277833
do
78287834
{
78297835
#ifdef NEED_RETRY
7830-
/* UTF-16 encoding may double the size, so use only INT_MAX/2
7831-
chunks. */
7832-
if (len > INT_MAX/2) {
7833-
chunk_len = INT_MAX/2;
7836+
if (len > DECODING_CHUNK_SIZE) {
7837+
chunk_len = DECODING_CHUNK_SIZE;
78347838
done = 0;
78357839
}
78367840
else

0 commit comments

Comments
 (0)
0