@@ -205,6 +205,16 @@ static void recreate_gil(struct _gil_runtime_state *gil)
205205}
206206#endif
207207
208+ static void
209+ drop_gil_impl (struct _gil_runtime_state * gil )
210+ {
211+ MUTEX_LOCK (gil -> mutex );
212+ _Py_ANNOTATE_RWLOCK_RELEASED (& gil -> locked , /*is_write=*/ 1 );
213+ _Py_atomic_store_int_relaxed (& gil -> locked , 0 );
214+ COND_SIGNAL (gil -> cond );
215+ MUTEX_UNLOCK (gil -> mutex );
216+ }
217+
208218static void
209219drop_gil (PyInterpreterState * interp , PyThreadState * tstate )
210220{
@@ -220,7 +230,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
220230
221231 struct _gil_runtime_state * gil = ceval -> gil ;
222232#ifdef Py_GIL_DISABLED
223- if (!gil -> enabled ) {
233+ if (!_Py_atomic_load_int_relaxed ( & gil -> enabled ) ) {
224234 return ;
225235 }
226236#endif
@@ -236,11 +246,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
236246 _Py_atomic_store_ptr_relaxed (& gil -> last_holder , tstate );
237247 }
238248
239- MUTEX_LOCK (gil -> mutex );
240- _Py_ANNOTATE_RWLOCK_RELEASED (& gil -> locked , /*is_write=*/ 1 );
241- _Py_atomic_store_int_relaxed (& gil -> locked , 0 );
242- COND_SIGNAL (gil -> cond );
243- MUTEX_UNLOCK (gil -> mutex );
249+ drop_gil_impl (gil );
244250
245251#ifdef FORCE_SWITCHING
246252 /* We check tstate first in case we might be releasing the GIL for
@@ -275,8 +281,10 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
275281
276282 The function saves errno at entry and restores its value at exit.
277283
278- tstate must be non-NULL. */
279- static void
284+ tstate must be non-NULL.
285+
286+ Returns 1 if the GIL was acquired, or 0 if not. */
287+ static int
280288take_gil (PyThreadState * tstate )
281289{
282290 int err = errno ;
@@ -300,8 +308,8 @@ take_gil(PyThreadState *tstate)
300308 PyInterpreterState * interp = tstate -> interp ;
301309 struct _gil_runtime_state * gil = interp -> ceval .gil ;
302310#ifdef Py_GIL_DISABLED
303- if (!gil -> enabled ) {
304- return ;
311+ if (!_Py_atomic_load_int_relaxed ( & gil -> enabled ) ) {
312+ return 0 ;
305313 }
306314#endif
307315
@@ -346,6 +354,17 @@ take_gil(PyThreadState *tstate)
346354 }
347355 }
348356
357+ #ifdef Py_GIL_DISABLED
358+ if (!_Py_atomic_load_int_relaxed (& gil -> enabled )) {
359+ // Another thread disabled the GIL between our check above and
360+ // now. Don't take the GIL, signal any other waiting threads, and
361+ // return 0.
362+ COND_SIGNAL (gil -> cond );
363+ MUTEX_UNLOCK (gil -> mutex );
364+ return 0 ;
365+ }
366+ #endif
367+
349368#ifdef FORCE_SWITCHING
350369 /* This mutex must be taken before modifying gil->last_holder:
351370 see drop_gil(). */
@@ -387,6 +406,7 @@ take_gil(PyThreadState *tstate)
387406 MUTEX_UNLOCK (gil -> mutex );
388407
389408 errno = err ;
409+ return 1 ;
390410}
391411
392412void _PyEval_SetSwitchInterval (unsigned long microseconds )
@@ -451,7 +471,8 @@ init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
451471{
452472 assert (!gil_created (gil ));
453473#ifdef Py_GIL_DISABLED
454- gil -> enabled = _PyInterpreterState_GetConfig (interp )-> enable_gil == _PyConfig_GIL_ENABLE ;
474+ const PyConfig * config = _PyInterpreterState_GetConfig (interp );
475+ gil -> enabled = config -> enable_gil == _PyConfig_GIL_ENABLE ? INT_MAX : 0 ;
455476#endif
456477 create_gil (gil );
457478 assert (gil_created (gil ));
@@ -545,11 +566,11 @@ PyEval_ReleaseLock(void)
545566 drop_gil (tstate -> interp , tstate );
546567}
547568
548- void
569+ int
549570_PyEval_AcquireLock (PyThreadState * tstate )
550571{
551572 _Py_EnsureTstateNotNULL (tstate );
552- take_gil (tstate );
573+ return take_gil (tstate );
553574}
554575
555576void
@@ -1011,6 +1032,117 @@ _PyEval_InitState(PyInterpreterState *interp)
10111032 _gil_initialize (& interp -> _gil );
10121033}
10131034
1035+ #ifdef Py_GIL_DISABLED
1036+ int
1037+ _PyEval_EnableGILTransient (PyThreadState * tstate )
1038+ {
1039+ const PyConfig * config = _PyInterpreterState_GetConfig (tstate -> interp );
1040+ if (config -> enable_gil != _PyConfig_GIL_DEFAULT ) {
1041+ return 0 ;
1042+ }
1043+ struct _gil_runtime_state * gil = tstate -> interp -> ceval .gil ;
1044+
1045+ int enabled = _Py_atomic_load_int_relaxed (& gil -> enabled );
1046+ if (enabled == INT_MAX ) {
1047+ // The GIL is already enabled permanently.
1048+ return 0 ;
1049+ }
1050+ if (enabled == INT_MAX - 1 ) {
1051+ Py_FatalError ("Too many transient requests to enable the GIL" );
1052+ }
1053+ if (enabled > 0 ) {
1054+ // If enabled is nonzero, we know we hold the GIL. This means that no
1055+ // other threads are attached, and nobody else can be concurrently
1056+ // mutating it.
1057+ _Py_atomic_store_int_relaxed (& gil -> enabled , enabled + 1 );
1058+ return 0 ;
1059+ }
1060+
1061+ // Enabling the GIL changes what it means to be an "attached" thread. To
1062+ // safely make this transition, we:
1063+ // 1. Detach the current thread.
1064+ // 2. Stop the world to detach (and suspend) all other threads.
1065+ // 3. Enable the GIL, if nobody else did between our check above and when
1066+ // our stop-the-world begins.
1067+ // 4. Start the world.
1068+ // 5. Attach the current thread. Other threads may attach and hold the GIL
1069+ // before this thread, which is harmless.
1070+ _PyThreadState_Detach (tstate );
1071+
1072+ // This could be an interpreter-local stop-the-world in situations where we
1073+ // know that this interpreter's GIL is not shared, and that it won't become
1074+ // shared before the stop-the-world begins. For now, we always stop all
1075+ // interpreters for simplicity.
1076+ _PyEval_StopTheWorldAll (& _PyRuntime );
1077+
1078+ enabled = _Py_atomic_load_int_relaxed (& gil -> enabled );
1079+ int this_thread_enabled = enabled == 0 ;
1080+ _Py_atomic_store_int_relaxed (& gil -> enabled , enabled + 1 );
1081+
1082+ _PyEval_StartTheWorldAll (& _PyRuntime );
1083+ _PyThreadState_Attach (tstate );
1084+
1085+ return this_thread_enabled ;
1086+ }
1087+
1088+ int
1089+ _PyEval_EnableGILPermanent (PyThreadState * tstate )
1090+ {
1091+ const PyConfig * config = _PyInterpreterState_GetConfig (tstate -> interp );
1092+ if (config -> enable_gil != _PyConfig_GIL_DEFAULT ) {
1093+ return 0 ;
1094+ }
1095+
1096+ struct _gil_runtime_state * gil = tstate -> interp -> ceval .gil ;
1097+ assert (current_thread_holds_gil (gil , tstate ));
1098+
1099+ int enabled = _Py_atomic_load_int_relaxed (& gil -> enabled );
1100+ if (enabled == INT_MAX ) {
1101+ return 0 ;
1102+ }
1103+
1104+ _Py_atomic_store_int_relaxed (& gil -> enabled , INT_MAX );
1105+ return 1 ;
1106+ }
1107+
1108+ int
1109+ _PyEval_DisableGIL (PyThreadState * tstate )
1110+ {
1111+ const PyConfig * config = _PyInterpreterState_GetConfig (tstate -> interp );
1112+ if (config -> enable_gil != _PyConfig_GIL_DEFAULT ) {
1113+ return 0 ;
1114+ }
1115+
1116+ struct _gil_runtime_state * gil = tstate -> interp -> ceval .gil ;
1117+ assert (current_thread_holds_gil (gil , tstate ));
1118+
1119+ int enabled = _Py_atomic_load_int_relaxed (& gil -> enabled );
1120+ if (enabled == INT_MAX ) {
1121+ return 0 ;
1122+ }
1123+
1124+ assert (enabled >= 1 );
1125+ enabled -- ;
1126+
1127+ // Disabling the GIL is much simpler than enabling it, since we know we are
1128+ // the only attached thread. Other threads may start free-threading as soon
1129+ // as this store is complete, if it sets gil->enabled to 0.
1130+ _Py_atomic_store_int_relaxed (& gil -> enabled , enabled );
1131+
1132+ if (enabled == 0 ) {
1133+ // We're attached, so we know the GIL will remain disabled until at
1134+ // least the next time we detach, which must be after this function
1135+ // returns.
1136+ //
1137+ // Drop the GIL, which will wake up any threads waiting in take_gil()
1138+ // and let them resume execution without the GIL.
1139+ drop_gil_impl (gil );
1140+ return 1 ;
1141+ }
1142+ return 0 ;
1143+ }
1144+ #endif
1145+
10141146
10151147/* Do periodic things, like check for signals and async I/0.
10161148* We need to do reasonably frequently, but not too frequently.
0 commit comments