-
-
Notifications
You must be signed in to change notification settings - Fork 34.2k
Expand file tree
/
Copy pathceval.c
More file actions
4148 lines (3743 loc) · 97.3 KB
/
ceval.c
File metadata and controls
4148 lines (3743 loc) · 97.3 KB
Edit and raw actions
OlderNewer
1
2
/* Execute compiled code */
3
4
/* XXX TO DO:
5
XXX speed up searching for keywords by using a dictionary
6
XXX document it!
7
*/
8
9
#include "Python.h"
10
11
#include "compile.h"
12
#include "frameobject.h"
13
#include "eval.h"
14
#include "opcode.h"
15
#include "structmember.h"
16
17
#ifdef macintosh
18
#include "macglue.h"
19
#endif
20
21
#include <ctype.h>
22
23
/* Turn this on if your compiler chokes on the big switch: */
24
/* #define CASE_TOO_BIG 1 */
25
26
#ifdef Py_DEBUG
27
/* For debugging the interpreter: */
28
#define LLTRACE 1 /* Low-level trace feature */
29
#define CHECKEXC 1 /* Double-check exception checking */
30
#endif
31
32
typedef PyObject *(*callproc)(PyObject *, PyObject *, PyObject *);
33
34
/* Forward declarations */
35
static PyObject *eval_frame(PyFrameObject *);
36
static PyObject *call_function(PyObject ***, int);
37
static PyObject *fast_function(PyObject *, PyObject ***, int, int, int);
38
static PyObject *do_call(PyObject *, PyObject ***, int, int);
39
static PyObject *ext_do_call(PyObject *, PyObject ***, int, int, int);
40
static PyObject *update_keyword_args(PyObject *, int, PyObject ***,PyObject *);
41
static PyObject *update_star_args(int, int, PyObject *, PyObject ***);
42
static PyObject *load_args(PyObject ***, int);
43
#define CALL_FLAG_VAR 1
44
#define CALL_FLAG_KW 2
45
46
#ifdef LLTRACE
47
static int prtrace(PyObject *, char *);
48
#endif
49
static int call_trace(Py_tracefunc, PyObject *, PyFrameObject *,
50
int, PyObject *);
51
static void call_trace_protected(Py_tracefunc, PyObject *,
52
PyFrameObject *, int);
53
static void call_exc_trace(Py_tracefunc, PyObject *, PyFrameObject *);
54
static int maybe_call_line_trace(Py_tracefunc, PyObject *,
55
PyFrameObject *, int *, int *);
56
57
static PyObject *apply_slice(PyObject *, PyObject *, PyObject *);
58
static int assign_slice(PyObject *, PyObject *,
59
PyObject *, PyObject *);
60
static PyObject *cmp_outcome(int, PyObject *, PyObject *);
61
static PyObject *import_from(PyObject *, PyObject *);
62
static int import_all_from(PyObject *, PyObject *);
63
static PyObject *build_class(PyObject *, PyObject *, PyObject *);
64
static int exec_statement(PyFrameObject *,
65
PyObject *, PyObject *, PyObject *);
66
static void set_exc_info(PyThreadState *, PyObject *, PyObject *, PyObject *);
67
static void reset_exc_info(PyThreadState *);
68
static void format_exc_check_arg(PyObject *, char *, PyObject *);
69
70
#define NAME_ERROR_MSG \
71
"name '%.200s' is not defined"
72
#define GLOBAL_NAME_ERROR_MSG \
73
"global name '%.200s' is not defined"
74
#define UNBOUNDLOCAL_ERROR_MSG \
75
"local variable '%.200s' referenced before assignment"
76
#define UNBOUNDFREE_ERROR_MSG \
77
"free variable '%.200s' referenced before assignment" \
78
" in enclosing scope"
79
80
/* Dynamic execution profile */
81
#ifdef DYNAMIC_EXECUTION_PROFILE
82
#ifdef DXPAIRS
83
static long dxpairs[257][256];
84
#define dxp dxpairs[256]
85
#else
86
static long dxp[256];
87
#endif
88
#endif
89
90
/* Function call profile */
91
#ifdef CALL_PROFILE
92
#define PCALL_NUM 11
93
static int pcall[PCALL_NUM];
94
95
#define PCALL_ALL 0
96
#define PCALL_FUNCTION 1
97
#define PCALL_FAST_FUNCTION 2
98
#define PCALL_FASTER_FUNCTION 3
99
#define PCALL_METHOD 4
100
#define PCALL_BOUND_METHOD 5
101
#define PCALL_CFUNCTION 6
102
#define PCALL_TYPE 7
103
#define PCALL_GENERATOR 8
104
#define PCALL_OTHER 9
105
#define PCALL_POP 10
106
107
/* Notes about the statistics
108
109
PCALL_FAST stats
110
111
FAST_FUNCTION means no argument tuple needs to be created.
112
FASTER_FUNCTION means that the fast-path frame setup code is used.
113
114
If there is a method call where the call can be optimized by changing
115
the argument tuple and calling the function directly, it gets recorded
116
twice.
117
118
As a result, the relationship among the statistics appears to be
119
PCALL_ALL == PCALL_FUNCTION + PCALL_METHOD - PCALL_BOUND_METHOD +
120
PCALL_CFUNCTION + PCALL_TYPE + PCALL_GENERATOR + PCALL_OTHER
121
PCALL_FUNCTION > PCALL_FAST_FUNCTION > PCALL_FASTER_FUNCTION
122
PCALL_METHOD > PCALL_BOUND_METHOD
123
*/
124
125
#define PCALL(POS) pcall[POS]++
126
127
PyObject *
128
PyEval_GetCallStats(PyObject *self)
129
{
130
return Py_BuildValue("iiiiiiiiii",
131
pcall[0], pcall[1], pcall[2], pcall[3],
132
pcall[4], pcall[5], pcall[6], pcall[7],
133
pcall[8], pcall[9]);
134
}
135
#else
136
#define PCALL(O)
137
138
PyObject *
139
PyEval_GetCallStats(PyObject *self)
140
{
141
Py_INCREF(Py_None);
142
return Py_None;
143
}
144
#endif
145
146
static PyTypeObject gentype;
147
148
typedef struct {
149
PyObject_HEAD
150
/* The gi_ prefix is intended to remind of generator-iterator. */
151
152
PyFrameObject *gi_frame;
153
154
/* True if generator is being executed. */
155
int gi_running;
156
157
/* List of weak reference. */
158
PyObject *gi_weakreflist;
159
} genobject;
160
161
static PyObject *
162
gen_new(PyFrameObject *f)
163
{
164
genobject *gen = PyObject_GC_New(genobject, &gentype);
165
if (gen == NULL) {
166
Py_DECREF(f);
167
return NULL;
168
}
169
gen->gi_frame = f;
170
gen->gi_running = 0;
171
gen->gi_weakreflist = NULL;
172
_PyObject_GC_TRACK(gen);
173
return (PyObject *)gen;
174
}
175
176
static int
177
gen_traverse(genobject *gen, visitproc visit, void *arg)
178
{
179
return visit((PyObject *)gen->gi_frame, arg);
180
}
181
182
static void
183
gen_dealloc(genobject *gen)
184
{
185
_PyObject_GC_UNTRACK(gen);
186
if (gen->gi_weakreflist != NULL)
187
PyObject_ClearWeakRefs((PyObject *) gen);
188
Py_DECREF(gen->gi_frame);
189
PyObject_GC_Del(gen);
190
}
191
192
static PyObject *
193
gen_iternext(genobject *gen)
194
{
195
PyThreadState *tstate = PyThreadState_GET();
196
PyFrameObject *f = gen->gi_frame;
197
PyObject *result;
198
199
if (gen->gi_running) {
200
PyErr_SetString(PyExc_ValueError,
201
"generator already executing");
202
return NULL;
203
}
204
if (f->f_stacktop == NULL)
205
return NULL;
206
207
/* Generators always return to their most recent caller, not
208
* necessarily their creator. */
209
Py_XINCREF(tstate->frame);
210
assert(f->f_back == NULL);
211
f->f_back = tstate->frame;
212
213
gen->gi_running = 1;
214
result = eval_frame(f);
215
gen->gi_running = 0;
216
217
/* Don't keep the reference to f_back any longer than necessary. It
218
* may keep a chain of frames alive or it could create a reference
219
* cycle. */
220
Py_XDECREF(f->f_back);
221
f->f_back = NULL;
222
223
/* If the generator just returned (as opposed to yielding), signal
224
* that the generator is exhausted. */
225
if (result == Py_None && f->f_stacktop == NULL) {
226
Py_DECREF(result);
227
result = NULL;
228
}
229
230
return result;
231
}
232
233
static PyObject *
234
gen_getiter(PyObject *gen)
235
{
236
Py_INCREF(gen);
237
return gen;
238
}
239
240
static PyMemberDef gen_memberlist[] = {
241
{"gi_frame", T_OBJECT, offsetof(genobject, gi_frame), RO},
242
{"gi_running", T_INT, offsetof(genobject, gi_running), RO},
243
{NULL} /* Sentinel */
244
};
245
246
static PyTypeObject gentype = {
247
PyObject_HEAD_INIT(&PyType_Type)
248
0, /* ob_size */
249
"generator", /* tp_name */
250
sizeof(genobject), /* tp_basicsize */
251
0, /* tp_itemsize */
252
/* methods */
253
(destructor)gen_dealloc, /* tp_dealloc */
254
0, /* tp_print */
255
0, /* tp_getattr */
256
0, /* tp_setattr */
257
0, /* tp_compare */
258
0, /* tp_repr */
259
0, /* tp_as_number */
260
0, /* tp_as_sequence */
261
0, /* tp_as_mapping */
262
0, /* tp_hash */
263
0, /* tp_call */
264
0, /* tp_str */
265
PyObject_GenericGetAttr, /* tp_getattro */
266
0, /* tp_setattro */
267
0, /* tp_as_buffer */
268
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
269
0, /* tp_doc */
270
(traverseproc)gen_traverse, /* tp_traverse */
271
0, /* tp_clear */
272
0, /* tp_richcompare */
273
offsetof(genobject, gi_weakreflist), /* tp_weaklistoffset */
274
(getiterfunc)gen_getiter, /* tp_iter */
275
(iternextfunc)gen_iternext, /* tp_iternext */
276
0, /* tp_methods */
277
gen_memberlist, /* tp_members */
278
0, /* tp_getset */
279
0, /* tp_base */
280
0, /* tp_dict */
281
};
282
283
284
#ifdef WITH_THREAD
285
286
#ifndef DONT_HAVE_ERRNO_H
287
#include <errno.h>
288
#endif
289
#include "pythread.h"
290
291
extern int _PyThread_Started; /* Flag for Py_Exit */
292
293
static PyThread_type_lock interpreter_lock = 0; /* This is the GIL */
294
static long main_thread = 0;
295
296
void
297
PyEval_InitThreads(void)
298
{
299
if (interpreter_lock)
300
return;
301
_PyThread_Started = 1;
302
interpreter_lock = PyThread_allocate_lock();
303
PyThread_acquire_lock(interpreter_lock, 1);
304
main_thread = PyThread_get_thread_ident();
305
}
306
307
void
308
PyEval_AcquireLock(void)
309
{
310
PyThread_acquire_lock(interpreter_lock, 1);
311
}
312
313
void
314
PyEval_ReleaseLock(void)
315
{
316
PyThread_release_lock(interpreter_lock);
317
}
318
319
void
320
PyEval_AcquireThread(PyThreadState *tstate)
321
{
322
if (tstate == NULL)
323
Py_FatalError("PyEval_AcquireThread: NULL new thread state");
324
/* Check someone has called PyEval_InitThreads() to create the lock */
325
assert(interpreter_lock);
326
PyThread_acquire_lock(interpreter_lock, 1);
327
if (PyThreadState_Swap(tstate) != NULL)
328
Py_FatalError(
329
"PyEval_AcquireThread: non-NULL old thread state");
330
}
331
332
void
333
PyEval_ReleaseThread(PyThreadState *tstate)
334
{
335
if (tstate == NULL)
336
Py_FatalError("PyEval_ReleaseThread: NULL thread state");
337
if (PyThreadState_Swap(NULL) != tstate)
338
Py_FatalError("PyEval_ReleaseThread: wrong thread state");
339
PyThread_release_lock(interpreter_lock);
340
}
341
342
/* This function is called from PyOS_AfterFork to ensure that newly
343
created child processes don't hold locks referring to threads which
344
are not running in the child process. (This could also be done using
345
pthread_atfork mechanism, at least for the pthreads implementation.) */
346
347
void
348
PyEval_ReInitThreads(void)
349
{
350
if (!interpreter_lock)
351
return;
352
/*XXX Can't use PyThread_free_lock here because it does too
353
much error-checking. Doing this cleanly would require
354
adding a new function to each thread_*.h. Instead, just
355
create a new lock and waste a little bit of memory */
356
interpreter_lock = PyThread_allocate_lock();
357
PyThread_acquire_lock(interpreter_lock, 1);
358
main_thread = PyThread_get_thread_ident();
359
}
360
#endif
361
362
/* Functions save_thread and restore_thread are always defined so
363
dynamically loaded modules needn't be compiled separately for use
364
with and without threads: */
365
366
PyThreadState *
367
PyEval_SaveThread(void)
368
{
369
PyThreadState *tstate = PyThreadState_Swap(NULL);
370
if (tstate == NULL)
371
Py_FatalError("PyEval_SaveThread: NULL tstate");
372
#ifdef WITH_THREAD
373
if (interpreter_lock)
374
PyThread_release_lock(interpreter_lock);
375
#endif
376
return tstate;
377
}
378
379
void
380
PyEval_RestoreThread(PyThreadState *tstate)
381
{
382
if (tstate == NULL)
383
Py_FatalError("PyEval_RestoreThread: NULL tstate");
384
#ifdef WITH_THREAD
385
if (interpreter_lock) {
386
int err = errno;
387
PyThread_acquire_lock(interpreter_lock, 1);
388
errno = err;
389
}
390
#endif
391
PyThreadState_Swap(tstate);
392
}
393
394
395
/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
396
signal handlers or Mac I/O completion routines) can schedule calls
397
to a function to be called synchronously.
398
The synchronous function is called with one void* argument.
399
It should return 0 for success or -1 for failure -- failure should
400
be accompanied by an exception.
401
402
If registry succeeds, the registry function returns 0; if it fails
403
(e.g. due to too many pending calls) it returns -1 (without setting
404
an exception condition).
405
406
Note that because registry may occur from within signal handlers,
407
or other asynchronous events, calling malloc() is unsafe!
408
409
#ifdef WITH_THREAD
410
Any thread can schedule pending calls, but only the main thread
411
will execute them.
412
#endif
413
414
XXX WARNING! ASYNCHRONOUSLY EXECUTING CODE!
415
There are two possible race conditions:
416
(1) nested asynchronous registry calls;
417
(2) registry calls made while pending calls are being processed.
418
While (1) is very unlikely, (2) is a real possibility.
419
The current code is safe against (2), but not against (1).
420
The safety against (2) is derived from the fact that only one
421
thread (the main thread) ever takes things out of the queue.
422
423
XXX Darn! With the advent of thread state, we should have an array
424
of pending calls per thread in the thread state! Later...
425
*/
426
427
#define NPENDINGCALLS 32
428
static struct {
429
int (*func)(void *);
430
void *arg;
431
} pendingcalls[NPENDINGCALLS];
432
static volatile int pendingfirst = 0;
433
static volatile int pendinglast = 0;
434
static volatile int things_to_do = 0;
435
436
int
437
Py_AddPendingCall(int (*func)(void *), void *arg)
438
{
439
static int busy = 0;
440
int i, j;
441
/* XXX Begin critical section */
442
/* XXX If you want this to be safe against nested
443
XXX asynchronous calls, you'll have to work harder! */
444
if (busy)
445
return -1;
446
busy = 1;
447
i = pendinglast;
448
j = (i + 1) % NPENDINGCALLS;
449
if (j == pendingfirst) {
450
busy = 0;
451
return -1; /* Queue full */
452
}
453
pendingcalls[i].func = func;
454
pendingcalls[i].arg = arg;
455
pendinglast = j;
456
457
_Py_Ticker = 0;
458
things_to_do = 1; /* Signal main loop */
459
busy = 0;
460
/* XXX End critical section */
461
return 0;
462
}
463
464
int
465
Py_MakePendingCalls(void)
466
{
467
static int busy = 0;
468
#ifdef WITH_THREAD
469
if (main_thread && PyThread_get_thread_ident() != main_thread)
470
return 0;
471
#endif
472
if (busy)
473
return 0;
474
busy = 1;
475
things_to_do = 0;
476
for (;;) {
477
int i;
478
int (*func)(void *);
479
void *arg;
480
i = pendingfirst;
481
if (i == pendinglast)
482
break; /* Queue empty */
483
func = pendingcalls[i].func;
484
arg = pendingcalls[i].arg;
485
pendingfirst = (i + 1) % NPENDINGCALLS;
486
if (func(arg) < 0) {
487
busy = 0;
488
things_to_do = 1; /* We're not done yet */
489
return -1;
490
}
491
}
492
busy = 0;
493
return 0;
494
}
495
496
497
/* The interpreter's recursion limit */
498
499
static int recursion_limit = 1000;
500
501
int
502
Py_GetRecursionLimit(void)
503
{
504
return recursion_limit;
505
}
506
507
void
508
Py_SetRecursionLimit(int new_limit)
509
{
510
recursion_limit = new_limit;
511
}
512
513
/* Status code for main loop (reason for stack unwind) */
514
515
enum why_code {
516
WHY_NOT, /* No error */
517
WHY_EXCEPTION, /* Exception occurred */
518
WHY_RERAISE, /* Exception re-raised by 'finally' */
519
WHY_RETURN, /* 'return' statement */
520
WHY_BREAK, /* 'break' statement */
521
WHY_CONTINUE, /* 'continue' statement */
522
WHY_YIELD /* 'yield' operator */
523
};
524
525
static enum why_code do_raise(PyObject *, PyObject *, PyObject *);
526
static int unpack_iterable(PyObject *, int, PyObject **);
527
528
/* for manipulating the thread switch and periodic "stuff" - used to be
529
per thread, now just a pair o' globals */
530
int _Py_CheckInterval = 100;
531
volatile int _Py_Ticker = 100;
532
533
PyObject *
534
PyEval_EvalCode(PyCodeObject *co, PyObject *globals, PyObject *locals)
535
{
536
/* XXX raise SystemError if globals is NULL */
537
return PyEval_EvalCodeEx(co,
538
globals, locals,
539
(PyObject **)NULL, 0,
540
(PyObject **)NULL, 0,
541
(PyObject **)NULL, 0,
542
NULL);
543
}
544
545
546
/* Interpreter main loop */
547
548
static PyObject *
549
eval_frame(PyFrameObject *f)
550
{
551
#ifdef DXPAIRS
552
int lastopcode = 0;
553
#endif
554
PyObject **stack_pointer; /* Next free slot in value stack */
555
register unsigned char *next_instr;
556
register int opcode=0; /* Current opcode */
557
register int oparg=0; /* Current opcode argument, if any */
558
register enum why_code why; /* Reason for block stack unwind */
559
register int err; /* Error status -- nonzero if error */
560
register PyObject *x; /* Result object -- NULL if error */
561
register PyObject *v; /* Temporary objects popped off stack */
562
register PyObject *w;
563
register PyObject *u;
564
register PyObject *t;
565
register PyObject *stream = NULL; /* for PRINT opcodes */
566
register PyObject **fastlocals, **freevars;
567
PyObject *retval = NULL; /* Return value */
568
PyThreadState *tstate = PyThreadState_GET();
569
PyCodeObject *co;
570
571
/* when tracing we set things up so that
572
573
not (instr_lb <= current_bytecode_offset < instr_ub)
574
575
is true when the line being executed has changed. The
576
initial values are such as to make this false the first
577
time it is tested. */
578
int instr_ub = -1, instr_lb = 0;
579
580
unsigned char *first_instr;
581
PyObject *names;
582
PyObject *consts;
583
#ifdef LLTRACE
584
int lltrace;
585
#endif
586
#if defined(Py_DEBUG) || defined(LLTRACE)
587
/* Make it easier to find out where we are with a debugger */
588
char *filename;
589
#endif
590
591
/* Tuple access macros */
592
593
#ifndef Py_DEBUG
594
#define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i))
595
#else
596
#define GETITEM(v, i) PyTuple_GetItem((v), (i))
597
#endif
598
599
/* Code access macros */
600
601
#define INSTR_OFFSET() (next_instr - first_instr)
602
#define NEXTOP() (*next_instr++)
603
#define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2])
604
#define JUMPTO(x) (next_instr = first_instr + (x))
605
#define JUMPBY(x) (next_instr += (x))
606
607
/* OpCode prediction macros
608
Some opcodes tend to come in pairs thus making it possible to predict
609
the second code when the first is run. For example, COMPARE_OP is often
610
followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And, those opcodes are often
611
followed by a POP_TOP.
612
613
Verifying the prediction costs a single high-speed test of register
614
variable against a constant. If the pairing was good, then the
615
processor has a high likelihood of making its own successful branch
616
prediction which results in a nearly zero overhead transition to the
617
next opcode.
618
619
A successful prediction saves a trip through the eval-loop including
620
its two unpredictable branches, the HASARG test and the switch-case.
621
*/
622
623
#define PREDICT(op) if (*next_instr == op) goto PRED_##op
624
#define PREDICTED(op) PRED_##op: next_instr++
625
#define PREDICTED_WITH_ARG(op) PRED_##op: oparg = (next_instr[2]<<8) + \
626
next_instr[1]; next_instr += 3
627
628
/* Stack manipulation macros */
629
630
#define STACK_LEVEL() (stack_pointer - f->f_valuestack)
631
#define EMPTY() (STACK_LEVEL() == 0)
632
#define TOP() (stack_pointer[-1])
633
#define SECOND() (stack_pointer[-2])
634
#define THIRD() (stack_pointer[-3])
635
#define FOURTH() (stack_pointer[-4])
636
#define SET_TOP(v) (stack_pointer[-1] = (v))
637
#define SET_SECOND(v) (stack_pointer[-2] = (v))
638
#define SET_THIRD(v) (stack_pointer[-3] = (v))
639
#define SET_FOURTH(v) (stack_pointer[-4] = (v))
640
#define BASIC_STACKADJ(n) (stack_pointer += n)
641
#define BASIC_PUSH(v) (*stack_pointer++ = (v))
642
#define BASIC_POP() (*--stack_pointer)
643
644
#ifdef LLTRACE
645
#define PUSH(v) { (void)(BASIC_PUSH(v), \
646
lltrace && prtrace(TOP(), "push")); \
647
assert(STACK_LEVEL() <= f->f_stacksize); }
648
#define POP() ((void)(lltrace && prtrace(TOP(), "pop")), BASIC_POP())
649
#define STACKADJ(n) { (void)(BASIC_STACKADJ(n), \
650
lltrace && prtrace(TOP(), "stackadj")); \
651
assert(STACK_LEVEL() <= f->f_stacksize); }
652
#else
653
#define PUSH(v) BASIC_PUSH(v)
654
#define POP() BASIC_POP()
655
#define STACKADJ(n) BASIC_STACKADJ(n)
656
#endif
657
658
/* Local variable macros */
659
660
#define GETLOCAL(i) (fastlocals[i])
661
662
/* The SETLOCAL() macro must not DECREF the local variable in-place and
663
then store the new value; it must copy the old value to a temporary
664
value, then store the new value, and then DECREF the temporary value.
665
This is because it is possible that during the DECREF the frame is
666
accessed by other code (e.g. a __del__ method or gc.collect()) and the
667
variable would be pointing to already-freed memory. */
668
#define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \
669
GETLOCAL(i) = value; \
670
Py_XDECREF(tmp); } while (0)
671
672
/* Start of code */
673
674
if (f == NULL)
675
return NULL;
676
677
#ifdef USE_STACKCHECK
678
if (tstate->recursion_depth%10 == 0 && PyOS_CheckStack()) {
679
PyErr_SetString(PyExc_MemoryError, "Stack overflow");
680
return NULL;
681
}
682
#endif
683
684
/* push frame */
685
if (++tstate->recursion_depth > recursion_limit) {
686
--tstate->recursion_depth;
687
PyErr_SetString(PyExc_RuntimeError,
688
"maximum recursion depth exceeded");
689
tstate->frame = f->f_back;
690
return NULL;
691
}
692
693
tstate->frame = f;
694
695
if (tstate->use_tracing) {
696
if (tstate->c_tracefunc != NULL) {
697
/* tstate->c_tracefunc, if defined, is a
698
function that will be called on *every* entry
699
to a code block. Its return value, if not
700
None, is a function that will be called at
701
the start of each executed line of code.
702
(Actually, the function must return itself
703
in order to continue tracing.) The trace
704
functions are called with three arguments:
705
a pointer to the current frame, a string
706
indicating why the function is called, and
707
an argument which depends on the situation.
708
The global trace function is also called
709
whenever an exception is detected. */
710
if (call_trace(tstate->c_tracefunc, tstate->c_traceobj,
711
f, PyTrace_CALL, Py_None)) {
712
/* Trace function raised an error */
713
--tstate->recursion_depth;
714
tstate->frame = f->f_back;
715
return NULL;
716
}
717
}
718
if (tstate->c_profilefunc != NULL) {
719
/* Similar for c_profilefunc, except it needn't
720
return itself and isn't called for "line" events */
721
if (call_trace(tstate->c_profilefunc,
722
tstate->c_profileobj,
723
f, PyTrace_CALL, Py_None)) {
724
/* Profile function raised an error */
725
--tstate->recursion_depth;
726
tstate->frame = f->f_back;
727
return NULL;
728
}
729
}
730
}
731
732
co = f->f_code;
733
names = co->co_names;
734
consts = co->co_consts;
735
fastlocals = f->f_localsplus;
736
freevars = f->f_localsplus + f->f_nlocals;
737
_PyCode_GETCODEPTR(co, &first_instr);
738
/* An explanation is in order for the next line.
739
740
f->f_lasti now refers to the index of the last instruction
741
executed. You might think this was obvious from the name, but
742
this wasn't always true before 2.3! PyFrame_New now sets
743
f->f_lasti to -1 (i.e. the index *before* the first instruction)
744
and YIELD_VALUE doesn't fiddle with f_lasti any more. So this
745
does work. Promise. */
746
next_instr = first_instr + f->f_lasti + 1;
747
stack_pointer = f->f_stacktop;
748
assert(stack_pointer != NULL);
749
f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */
750
751
#ifdef LLTRACE
752
lltrace = PyDict_GetItemString(f->f_globals,"__lltrace__") != NULL;
753
#endif
754
#if defined(Py_DEBUG) || defined(LLTRACE)
755
filename = PyString_AsString(co->co_filename);
756
#endif
757
758
why = WHY_NOT;
759
err = 0;
760
x = Py_None; /* Not a reference, just anything non-NULL */
761
w = NULL;
762
763
for (;;) {
764
assert(stack_pointer >= f->f_valuestack); /* else underflow */
765
assert(STACK_LEVEL() <= f->f_stacksize); /* else overflow */
766
767
/* Do periodic things. Doing this every time through
768
the loop would add too much overhead, so we do it
769
only every Nth instruction. We also do it if
770
``things_to_do'' is set, i.e. when an asynchronous
771
event needs attention (e.g. a signal handler or
772
async I/O handler); see Py_AddPendingCall() and
773
Py_MakePendingCalls() above. */
774
775
if (--_Py_Ticker < 0) {
776
if (*next_instr == SETUP_FINALLY) {
777
/* Make the last opcode before
778
a try: finally: block uninterruptable. */
779
goto fast_next_opcode;
780
}
781
_Py_Ticker = _Py_CheckInterval;
782
tstate->tick_counter++;
783
if (things_to_do) {
784
if (Py_MakePendingCalls() < 0) {
785
why = WHY_EXCEPTION;
786
goto on_error;
787
}
788
if (things_to_do)
789
/* MakePendingCalls() didn't succeed.
790
Force early re-execution of this
791
"periodic" code, possibly after
792
a thread switch */
793
_Py_Ticker = 0;
794
}
795
#if !defined(HAVE_SIGNAL_H) || defined(macintosh)
796
/* If we have true signals, the signal handler
797
will call Py_AddPendingCall() so we don't
798
have to call PyErr_CheckSignals(). On the
799
Mac and DOS, alas, we have to call it. */
800
if (PyErr_CheckSignals()) {
801
why = WHY_EXCEPTION;
802
goto on_error;
803
}
804
#endif
805
806
#ifdef WITH_THREAD
807
if (interpreter_lock) {
808
/* Give another thread a chance */
809
810
if (PyThreadState_Swap(NULL) != tstate)
811
Py_FatalError("ceval: tstate mix-up");
812
PyThread_release_lock(interpreter_lock);
813
814
/* Other threads may run now */
815
816
PyThread_acquire_lock(interpreter_lock, 1);
817
if (PyThreadState_Swap(tstate) != NULL)
818
Py_FatalError("ceval: orphan tstate");
819
820
/* Check for thread interrupts */
821
822
if (tstate->async_exc != NULL) {
823
x = tstate->async_exc;
824
tstate->async_exc = NULL;
825
PyErr_SetNone(x);
826
Py_DECREF(x);
827
why = WHY_EXCEPTION;
828
goto on_error;
829
}
830
}
831
#endif
832
}
833
834
fast_next_opcode:
835
f->f_lasti = INSTR_OFFSET();
836
837
/* line-by-line tracing support */
838
839
if (tstate->c_tracefunc != NULL && !tstate->tracing) {
840
/* see maybe_call_line_trace
841
for expository comments */
842
f->f_stacktop = stack_pointer;
843
844
err = maybe_call_line_trace(tstate->c_tracefunc,
845
tstate->c_traceobj,
846
f, &instr_lb, &instr_ub);
847
/* Reload possibly changed frame fields */
848
JUMPTO(f->f_lasti);
849
if (f->f_stacktop != NULL) {
850
stack_pointer = f->f_stacktop;
851
f->f_stacktop = NULL;
852
}
853
if (err) {
854
/* trace function raised an exception */
855
goto on_error;
856
}
857
}
858
859
/* Extract opcode and argument */
860
861
opcode = NEXTOP();
862
if (HAS_ARG(opcode))
863
oparg = NEXTARG();
864
dispatch_opcode:
865
#ifdef DYNAMIC_EXECUTION_PROFILE
866
#ifdef DXPAIRS
867
dxpairs[lastopcode][opcode]++;
868
lastopcode = opcode;
869
#endif
870
dxp[opcode]++;
871
#endif
872
873
#ifdef LLTRACE
874
/* Instruction tracing */
875
876
if (lltrace) {
877
if (HAS_ARG(opcode)) {
878
printf("%d: %d, %d\n",
879
f->f_lasti, opcode, oparg);
880
}
881
else {
882
printf("%d: %d\n",
883
f->f_lasti, opcode);
884
}
885
}
886
#endif
887
888
/* Main switch on opcode */
889
890
switch (opcode) {
891
892
/* BEWARE!
893
It is essential that any operation that fails sets either
894
x to NULL, err to nonzero, or why to anything but WHY_NOT,
895
and that no operation that succeeds does this! */
896
897
/* case STOP_CODE: this is an error! */
898
899
case LOAD_FAST:
900
x = GETLOCAL(oparg);
901
if (x != NULL) {
902
Py_INCREF(x);
903
PUSH(x);
904
goto fast_next_opcode;
905
}
906
format_exc_check_arg(PyExc_UnboundLocalError,
907
UNBOUNDLOCAL_ERROR_MSG,
908
PyTuple_GetItem(co->co_varnames, oparg));
909
break;
910
911
case LOAD_CONST:
912
x = GETITEM(consts, oparg);
913
Py_INCREF(x);
914
PUSH(x);
915
goto fast_next_opcode;
916
917
PREDICTED_WITH_ARG(STORE_FAST);
918
case STORE_FAST:
919
v = POP();
920
SETLOCAL(oparg, v);
921
goto fast_next_opcode;
922
923
PREDICTED(POP_TOP);
924
case POP_TOP:
925
v = POP();
926
Py_DECREF(v);
927
goto fast_next_opcode;
928
929
case ROT_TWO:
930
v = TOP();
931
w = SECOND();
932
SET_TOP(w);
933
SET_SECOND(v);
934
goto fast_next_opcode;
935
936
case ROT_THREE:
937
v = TOP();
938
w = SECOND();
939
x = THIRD();
940
SET_TOP(w);
941
SET_SECOND(x);
942
SET_THIRD(v);
943
goto fast_next_opcode;
944
945
case ROT_FOUR:
946
u = TOP();
947
v = SECOND();
948
w = THIRD();
949
x = FOURTH();
950
SET_TOP(v);
951
SET_SECOND(w);
952
SET_THIRD(x);
953
SET_FOURTH(u);
954
goto fast_next_opcode;
955
956
case DUP_TOP:
957
v = TOP();
958
Py_INCREF(v);
959
PUSH(v);
960
goto fast_next_opcode;
961
962
case DUP_TOPX:
963
if (oparg == 2) {
964
x = TOP();
965
Py_INCREF(x);
966
w = SECOND();
967
Py_INCREF(w);
968
STACKADJ(2);
969
SET_TOP(x);
970
SET_SECOND(w);
971
goto fast_next_opcode;
972
} else if (oparg == 3) {
973
x = TOP();
974
Py_INCREF(x);
975
w = SECOND();
976
Py_INCREF(w);
977
v = THIRD();
978
Py_INCREF(v);
979
STACKADJ(3);
980
SET_TOP(x);
981
SET_SECOND(w);
982
SET_THIRD(v);
983
goto fast_next_opcode;
984
}
985
Py_FatalError("invalid argument to DUP_TOPX"
986
" (bytecode corruption?)");
987
break;
988
989
case UNARY_POSITIVE:
990
v = TOP();
991
x = PyNumber_Positive(v);
992
Py_DECREF(v);
993
SET_TOP(x);
994
if (x != NULL) continue;
995
break;
996
997
case UNARY_NEGATIVE:
998
v = TOP();
999
x = PyNumber_Negative(v);
1000
Py_DECREF(v);