@@ -104,7 +104,8 @@ static frameobject *current_frame;
104104#include <errno.h>
105105#include "thread.h"
106106
107- static type_lock interpreter_lock ;
107+ static type_lock interpreter_lock = 0 ;
108+ static long main_thread = 0 ;
108109
109110void
110111init_save_thread ()
@@ -113,6 +114,7 @@ init_save_thread()
113114 return ;
114115 interpreter_lock = allocate_lock ();
115116 acquire_lock (interpreter_lock , 1 );
117+ main_thread = get_thread_ident ();
116118}
117119
118120#endif
@@ -152,6 +154,87 @@ restore_thread(x)
152154}
153155
154156
157+ /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
158+ signal handlers or Mac I/O completion routines) can schedule calls
159+ to a function to be called synchronously.
160+ The synchronous function is called with one void* argument.
161+ It should return 0 for success or -1 for failure -- failure should
162+ be accompanied by an exception.
163+
164+ If registry succeeds, the registry function returns 0; if it fails
165+ (e.g. due to too many pending calls) it returns -1 (without setting
166+ an exception condition).
167+
168+ Note that because registry may occur from within signal handlers,
169+ or other asynchronous events, calling malloc() is unsafe!
170+
171+ #ifdef WITH_THREAD
172+ Any thread can schedule pending calls, but only the main thread
173+ will execute them.
174+ #endif
175+
176+ XXX WARNING! ASYNCHRONOUSLY EXECUTING CODE!
177+ There are two possible race conditions:
178+ (1) nested asynchronous registry calls;
179+ (2) registry calls made while pending calls are being processed.
180+ While (1) is very unlikely, (2) is a real possibility.
181+ The current code is safe against (2), but not against (1).
182+ The safety against (2) is derived from the fact that only one
183+ thread (the main thread) ever takes things out of the queue.
184+ */
185+
186+ #define NPENDINGCALLS 32
187+ static struct {
188+ int (* func ) PROTO ((ANY * ));
189+ ANY * arg ;
190+ } pendingcalls [NPENDINGCALLS ];
191+ static volatile int pendingfirst = 0 ;
192+ static volatile int pendinglast = 0 ;
193+
194+ int
195+ Py_AddPendingCall (func , arg )
196+ int (* func ) PROTO ((ANY * ));
197+ ANY * arg ;
198+ {
199+ int i , j ;
200+ /* XXX Begin critical section */
201+ /* XXX If you want this to be safe against nested
202+ XXX asynchronous calls, you'll have to work harder! */
203+ i = pendinglast ;
204+ j = (i + 1 ) % NPENDINGCALLS ;
205+ if (j == pendingfirst )
206+ return -1 ; /* Queue full */
207+ pendingcalls [i ].func = func ;
208+ pendingcalls [i ].arg = arg ;
209+ pendinglast = j ;
210+ /* XXX End critical section */
211+ return 0 ;
212+ }
213+
214+ static int
215+ MakePendingCalls ()
216+ {
217+ #ifdef WITH_THREAD
218+ if (get_thread_ident () != main_thread )
219+ return 0 ;
220+ #endif
221+ for (;;) {
222+ int i ;
223+ int (* func ) PROTO ((ANY * ) );
224+ ANY * arg ;
225+ i = pendingfirst ;
226+ if (i == pendinglast )
227+ break ; /* Queue empty */
228+ func = pendingcalls [i ].func ;
229+ arg = pendingcalls [i ].arg ;
230+ pendingfirst = (i + 1 ) % NPENDINGCALLS ;
231+ if (func (arg ) < 0 )
232+ return -1 ;
233+ }
234+ return 0 ;
235+ }
236+
237+
155238/* Status code for main loop (reason for stack unwind) */
156239
157240enum why_code {
@@ -314,6 +397,13 @@ eval_code(co, globals, locals, owner, arg)
314397 too much overhead (a function call per instruction).
315398 So we do it only every Nth instruction. */
316399
400+ if (pendingfirst != pendinglast ) {
401+ if (MakePendingCalls () < 0 ) {
402+ why = WHY_EXCEPTION ;
403+ goto on_error ;
404+ }
405+ }
406+
317407 if (-- ticker < 0 ) {
318408 ticker = ticker_count ;
319409 if (sigcheck ()) {
0 commit comments