8000 bpo-33608: Simplify DISPATCH by hoisting eval_breaker ahead of time. by ericsnowcurrently · Pull Request #12062 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content

bpo-33608: Simplify DISPATCH by hoisting eval_breaker ahead of time. #12062

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Fix the Py_atomic_* macros.
  • Loading branch information
ericsnowcurrently committed Mar 1, 2019
commit 7db14bc3b59d8e5eb33fe6b6d9fb8516a2b724bf
48 changes: 24 additions & 24 deletions Include/internal/pycore_atomic.h
8000
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
atomic_thread_fence(ORDER)

#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)

/* Use builtin atomic operations in GCC >= 4.7 */
#elif defined(HAVE_BUILTIN_ATOMIC)
Expand Down Expand Up @@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \
__atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \
__atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))

/* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) and MSVC x86/x64/ARM */
Expand Down Expand Up @@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
}

#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
#endif

inline int _Py_atomic_load_32bit(volatile int* value, int order) {
Expand Down Expand Up @@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
}

#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof(*ATOMIC_VAL._value) == 8) { \
_Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
_Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
_Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \
_Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \
)
#elif defined(_M_ARM) || defined(_M_ARM64)
typedef enum _Py_memory_order {
Expand All @@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
default: \
_InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
}
#else
Expand All @@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value 8000 ), (int)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
default: \
_InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
}

Expand Down Expand Up @@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
}

#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
#endif

inline int _Py_atomic_load_32bit(volatile int* value, int order) {
Expand Down Expand Up @@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
}

#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof(*ATOMIC_VAL._value) == 8) { \
_Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
_Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
_Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \
_Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \
)
#endif
#else /* !gcc x86 !_msc_ver */
Expand Down
0