Files
typthon/Python/critical_section.c
copilot-swe-agent[bot] 3ce4b26be2 Continue fixing Py_ to Ty_ renaming - fix missed patterns
Fixed additional patterns that were missed in the initial renaming:
- PyThreadState → TyThreadState (typedef and all uses)
- PyMem_RawFree → TyMem_RawFree
- Py_buffer → Ty_buffer
- Py_CLEANUP_SUPPORTED → Ty_CLEANUP_SUPPORTED
- PyStatus → TyStatus and PyStatus_NoMemory → TyStatus_NoMemory
- _Py__has_builtin → _Ty__has_builtin
- _Py_SINGLETON → _Ty_SINGLETON
- _Py_CODEUNIT → _Ty_CODEUNIT
- _Py_BackoffCounter → _Ty_BackoffCounter
- _Py_slot_* and _Py_type_* patterns

Build is progressing with fewer errors.

Co-authored-by: johndoe6345789 <224850594+johndoe6345789@users.noreply.github.com>
2025-12-29 18:23:23 +00:00

159 lines
3.7 KiB
C

#include "Python.h"
#include "pycore_lock.h"
#include "pycore_critical_section.h"
#ifdef Ty_GIL_DISABLED
static_assert(_Alignof(PyCriticalSection) >= 4,
"critical section must be aligned to at least 4 bytes");
#endif
#ifdef Ty_GIL_DISABLED
static PyCriticalSection *
untag_critical_section(uintptr_t tag)
{
return (PyCriticalSection *)(tag & ~_Ty_CRITICAL_SECTION_MASK);
}
#endif
void
_PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m)
{
#ifdef Ty_GIL_DISABLED
TyThreadState *tstate = _TyThreadState_GET();
// As an optimisation for locking the same object recursively, skip
// locking if the mutex is currently locked by the top-most critical
// section.
if (tstate->critical_section &&
untag_critical_section(tstate->critical_section)->_cs_mutex == m) {
c->_cs_mutex = NULL;
c->_cs_prev = 0;
return;
}
c->_cs_mutex = NULL;
c->_cs_prev = (uintptr_t)tstate->critical_section;
tstate->critical_section = (uintptr_t)c;
PyMutex_Lock(m);
c->_cs_mutex = m;
#endif
}
void
_PyCriticalSection2_BeginSlow(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
int is_m1_locked)
{
#ifdef Ty_GIL_DISABLED
TyThreadState *tstate = _TyThreadState_GET();
c->_cs_base._cs_mutex = NULL;
c->_cs_mutex2 = NULL;
c->_cs_base._cs_prev = tstate->critical_section;
tstate->critical_section = (uintptr_t)c | _Ty_CRITICAL_SECTION_TWO_MUTEXES;
if (!is_m1_locked) {
PyMutex_Lock(m1);
}
PyMutex_Lock(m2);
c->_cs_base._cs_mutex = m1;
c->_cs_mutex2 = m2;
#endif
}
// Release all locks held by critical sections. This is called by
// _TyThreadState_Detach.
void
_PyCriticalSection_SuspendAll(TyThreadState *tstate)
{
#ifdef Ty_GIL_DISABLED
uintptr_t *tagptr = &tstate->critical_section;
while (_PyCriticalSection_IsActive(*tagptr)) {
PyCriticalSection *c = untag_critical_section(*tagptr);
if (c->_cs_mutex) {
PyMutex_Unlock(c->_cs_mutex);
if ((*tagptr & _Ty_CRITICAL_SECTION_TWO_MUTEXES)) {
PyCriticalSection2 *c2 = (PyCriticalSection2 *)c;
if (c2->_cs_mutex2) {
PyMutex_Unlock(c2->_cs_mutex2);
}
}
}
*tagptr |= _Ty_CRITICAL_SECTION_INACTIVE;
tagptr = &c->_cs_prev;
}
#endif
}
void
_PyCriticalSection_Resume(TyThreadState *tstate)
{
#ifdef Ty_GIL_DISABLED
uintptr_t p = tstate->critical_section;
PyCriticalSection *c = untag_critical_section(p);
assert(!_PyCriticalSection_IsActive(p));
PyMutex *m1 = c->_cs_mutex;
c->_cs_mutex = NULL;
PyMutex *m2 = NULL;
PyCriticalSection2 *c2 = NULL;
if ((p & _Ty_CRITICAL_SECTION_TWO_MUTEXES)) {
c2 = (PyCriticalSection2 *)c;
m2 = c2->_cs_mutex2;
c2->_cs_mutex2 = NULL;
}
if (m1) {
PyMutex_Lock(m1);
}
if (m2) {
PyMutex_Lock(m2);
}
c->_cs_mutex = m1;
if (m2) {
c2->_cs_mutex2 = m2;
}
tstate->critical_section &= ~_Ty_CRITICAL_SECTION_INACTIVE;
#endif
}
#undef PyCriticalSection_Begin
void
PyCriticalSection_Begin(PyCriticalSection *c, TyObject *op)
{
#ifdef Ty_GIL_DISABLED
_PyCriticalSection_Begin(c, op);
#endif
}
#undef PyCriticalSection_End
void
PyCriticalSection_End(PyCriticalSection *c)
{
#ifdef Ty_GIL_DISABLED
_PyCriticalSection_End(c);
#endif
}
#undef PyCriticalSection2_Begin
void
PyCriticalSection2_Begin(PyCriticalSection2 *c, TyObject *a, TyObject *b)
{
#ifdef Ty_GIL_DISABLED
_PyCriticalSection2_Begin(c, a, b);
#endif
}
#undef PyCriticalSection2_End
void
PyCriticalSection2_End(PyCriticalSection2 *c)
{
#ifdef Ty_GIL_DISABLED
_PyCriticalSection2_End(c);
#endif
}