8000 [WIP] bpo-32592: Drop support for Windows Vista by vstinner · Pull Request #5231 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content

[WIP] bpo-32592: Drop support for Windows Vista #5231

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Revert: keep _PY_EMULATED_WIN_CV
  • Loading branch information
vstinner committed Jan 18, 2018
commit 764f4a89c39ef1290b647a5163a78aa47923b237
54 changes: 53 additions & 1 deletion Include/internal/condvar.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,70 @@
#define PyCOND_T pthread_cond_t

#elif defined(NT_THREADS)
/* Windows support: use native Win7 primitives */
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR

/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>

/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif

/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif

#if _PY_EMULATED_WIN_CV

typedef CRITICAL_SECTION PyMUTEX_T;

/* The ConditionVariable object. From XP onwards it is easily emulated
with a Semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is
counted, not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
*/

typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;

#else /* !_PY_EMULATED_WIN_CV */

/* Use native Win7 primitives if build target is Win7 or higher */

/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;

typedef CONDITION_VARIABLE PyCOND_T;

#endif /* _PY_EMULATED_WIN_CV */

#endif /* _POSIX_THREADS, NT_THREADS */

#endif /* Py_INTERNAL_CONDVAR_H */
153 changes: 153 additions & 0 deletions Python/condvar.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,156 @@ PyCOND_TIMEDWAIT(PyCOND_T *cond, PyMUTEX_T *mut, long long us)
}

#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/

#if _PY_EMULATED_WIN_CV

/* The mutex is a CriticalSection object and
The condition variables is emulated with the help of a semaphore.

This implementation still has the problem that the threads woken
with a "signal" aren't necessarily those that are already
waiting. It corresponds to listing 2 in:
http://birrell.org/andrew/papers/ImplementingCVs.pdf

Generic emulations of the pthread_cond_* API using
earlier Win32 functions can be found on the Web.
The following read can be give background information to these issues,
but the implementations are all broken in some way.
http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
*/

Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
{
InitializeCriticalSection(cs);
return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_FINI(PyMUTEX_T *cs)
{
DeleteCriticalSection(cs);
return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_LOCK(PyMUTEX_T *cs)
{
EnterCriticalSection(cs);
return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_UNLOCK(PyMUTEX_T *cs)
{
LeaveCriticalSection(cs);
return 0;
}


Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
{
/* A semaphore with a "large" max value, The positive value
* is only needed to catch those "lost wakeup" events and
* race conditions when a timed wait elapses.
*/
cv->sem = CreateSemaphore(NULL, 0, 100000, NULL);
if (cv->sem==NULL)
return -1;
cv->waiting = 0;
return 0;
}

Py_LOCAL_INLINE(int)
PyCOND_FINI(PyCOND_T *cv)
{
return CloseHandle(cv->sem) ? 0 : -1;
}

/* this implementation can detect a timeout. Returns 1 on timeout,
* 0 otherwise (and -1 on error)
*/
Py_LOCAL_INLINE(int)
_PyCOND_WAIT_MS(PyCOND_T *cv, PyMUTEX_T *cs, DWORD ms)
{
DWORD wait;
cv->waiting++;
PyMUTEX_UNLOCK(cs);
/* "lost wakeup bug" would occur if the caller were interrupted here,
* but we are safe because we are using a semaphore which has an internal
* count.
*/
wait = WaitForSingleObjectEx(cv->sem, ms, FALSE);
PyMUTEX_LOCK(cs);
if (wait != WAIT_OBJECT_0)
--cv->waiting;
/* Here we have a benign race condition with PyCOND_SIGNAL.
* When failure occurs or timeout, it is possible that
* PyCOND_SIGNAL also decrements this value
* and signals releases the mutex. This is benign because it
* just means an extra spurious wakeup for a waiting thread.
* ('waiting' corresponds to the semaphore's "negative" count and
* we may end up with e.g. (waiting == -1 && sem.count == 1). When
* a new thread comes along, it will pass right throuhgh, having
* adjusted it to (waiting == 0 && sem.count == 0).
*/

if (wait == WAIT_FAILED)
return -1;
/* return 0 on success, 1 on timeout */
return wait != WAIT_OBJECT_0;
}

Py_LOCAL_INLINE(int)
PyCOND_WAIT(PyCOND_T *cv, PyMUTEX_T *cs)
{
int result = _PyCOND_WAIT_MS(cv, cs, INFINITE);
return result >= 0 ? 0 : result;
}

Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cv, PyMUTEX_T *cs, long long us)
{
return _PyCOND_WAIT_MS(cv, cs, (DWORD)(us/1000));
}

Py_LOCAL_INLINE(int)
PyCOND_SIGNAL(PyCOND_T *cv)
{
/* this test allows PyCOND_SIGNAL to be a no-op unless required
* to wake someone up, thus preventing an unbounded increase of
* the semaphore's internal counter.
*/
if (cv->waiting > 0) {
/* notifying thread decreases the cv->waiting count so that
* a delay between notify and actual wakeup of the target thread
* doesn't cause a number of extra ReleaseSemaphore calls.
*/
cv->waiting--;
return ReleaseSemaphore(cv->sem, 1, NULL) ? 0 : -1;
}
return 0;
}

Py_LOCAL_INLINE(int)
PyCOND_BROADCAST(PyCOND_T *cv)
{
int waiting = cv->waiting;
if (waiting > 0) {
cv->waiting = 0;
return ReleaseSemaphore(cv->sem, waiting, NULL) ? 0 : -1;
}
return 0;
}

#else /* !_PY_EMULATED_WIN_CV */

Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
Expand Down Expand Up @@ -167,6 +317,9 @@ PyCOND_BROADCAST(PyCOND_T *cv)
return 0;
}


#endif /* _PY_EMULATED_WIN_CV */

#endif /* _POSIX_THREADS, NT_THREADS */

#endif /* _CONDVAR_IMPL_H_ */
0