py: Put all global state together in state structures.
This patch consolidates all global variables in py/ core into one place,
in a global structure. Root pointers are all located together to make
GC tracing easier and more efficient.
diff --git a/py/builtin.h b/py/builtin.h
index 14dc273..70cc28a 100644
--- a/py/builtin.h
+++ b/py/builtin.h
@@ -90,7 +90,6 @@
extern const mp_obj_module_t mp_module_gc;
extern const mp_obj_dict_t mp_module_builtins_globals;
-extern mp_obj_dict_t *mp_module_builtins_override_dict;
struct _dummy_t;
extern struct _dummy_t mp_sys_stdin_obj;
diff --git a/py/emitbc.c b/py/emitbc.c
index 3914a9d..a93c03e 100644
--- a/py/emitbc.c
+++ b/py/emitbc.c
@@ -30,6 +30,7 @@
#include <string.h>
#include <assert.h>
+#include "py/mpstate.h"
#include "py/emit.h"
#include "py/bc0.h"
@@ -383,7 +384,7 @@
STATIC void emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
//printf("source: line %d -> %d offset %d -> %d\n", emit->last_source_line, source_line, emit->last_source_line_offset, emit->bytecode_offset);
#if MICROPY_ENABLE_SOURCE_LINE
- if (mp_optimise_value >= 3) {
+ if (MP_STATE_VM(mp_optimise_value) >= 3) {
// If we compile with -O3, don't store line numbers.
return;
}
diff --git a/py/gc.c b/py/gc.c
index 07f0b80..73a6ca8 100644
--- a/py/gc.c
+++ b/py/gc.c
@@ -28,6 +28,7 @@
#include <stdio.h>
#include <string.h>
+#include "py/mpstate.h"
#include "py/gc.h"
#include "py/obj.h"
#include "py/runtime.h"
@@ -48,25 +49,6 @@
#define WORDS_PER_BLOCK (4)
#define BYTES_PER_BLOCK (WORDS_PER_BLOCK * BYTES_PER_WORD)
-STATIC byte *gc_alloc_table_start;
-STATIC mp_uint_t gc_alloc_table_byte_len;
-#if MICROPY_ENABLE_FINALISER
-STATIC byte *gc_finaliser_table_start;
-#endif
-// We initialise gc_pool_start to a dummy value so it stays out of the bss
-// section. This makes sure we don't trace this pointer in a collect cycle.
-// If we did trace it, it would make the first block of the heap always
-// reachable, and hence we can never free that block.
-STATIC mp_uint_t *gc_pool_start = (void*)4;
-STATIC mp_uint_t *gc_pool_end;
-
-STATIC int gc_stack_overflow;
-STATIC mp_uint_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
-STATIC mp_uint_t *gc_sp;
-STATIC uint16_t gc_lock_depth;
-uint16_t gc_auto_collect_enabled;
-STATIC mp_uint_t gc_last_free_atb_index;
-
// ATB = allocation table byte
// 0b00 = FREE -- free block
// 0b01 = HEAD -- head of a chain of blocks
@@ -90,15 +72,15 @@
#define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0)
#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
-#define ATB_GET_KIND(block) ((gc_alloc_table_start[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
-#define ATB_ANY_TO_FREE(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
-#define ATB_FREE_TO_HEAD(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
-#define ATB_FREE_TO_TAIL(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
-#define ATB_HEAD_TO_MARK(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
-#define ATB_MARK_TO_HEAD(block) do { gc_alloc_table_start[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
+#define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
+#define ATB_ANY_TO_FREE(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
+#define ATB_FREE_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
+#define ATB_FREE_TO_TAIL(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
+#define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
+#define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
-#define BLOCK_FROM_PTR(ptr) (((ptr) - (mp_uint_t)gc_pool_start) / BYTES_PER_BLOCK)
-#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (mp_uint_t)gc_pool_start))
+#define BLOCK_FROM_PTR(ptr) (((ptr) - (mp_uint_t)MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
+#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (mp_uint_t)MP_STATE_MEM(gc_pool_start)))
#define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB)
#if MICROPY_ENABLE_FINALISER
@@ -107,9 +89,9 @@
#define BLOCKS_PER_FTB (8)
-#define FTB_GET(block) ((gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
-#define FTB_SET(block) do { gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
-#define FTB_CLEAR(block) do { gc_finaliser_table_start[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
+#define FTB_GET(block) ((MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
+#define FTB_SET(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
+#define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
#endif
// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
@@ -125,67 +107,67 @@
// => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
mp_uint_t total_byte_len = (byte*)end - (byte*)start;
#if MICROPY_ENABLE_FINALISER
- gc_alloc_table_byte_len = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
+ MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
#else
- gc_alloc_table_byte_len = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
+ MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
#endif
- gc_alloc_table_start = (byte*)start;
+ MP_STATE_MEM(gc_alloc_table_start) = (byte*)start;
#if MICROPY_ENABLE_FINALISER
- mp_uint_t gc_finaliser_table_byte_len = (gc_alloc_table_byte_len * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
- gc_finaliser_table_start = gc_alloc_table_start + gc_alloc_table_byte_len;
+ mp_uint_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
+ MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len);
#endif
- mp_uint_t gc_pool_block_len = gc_alloc_table_byte_len * BLOCKS_PER_ATB;
- gc_pool_start = (mp_uint_t*)((byte*)end - gc_pool_block_len * BYTES_PER_BLOCK);
- gc_pool_end = (mp_uint_t*)end;
+ mp_uint_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
+ MP_STATE_MEM(gc_pool_start) = (mp_uint_t*)((byte*)end - gc_pool_block_len * BYTES_PER_BLOCK);
+ MP_STATE_MEM(gc_pool_end) = (mp_uint_t*)end;
#if MICROPY_ENABLE_FINALISER
- assert((byte*)gc_pool_start >= gc_finaliser_table_start + gc_finaliser_table_byte_len);
+ assert((byte*)MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
#endif
// clear ATBs
- memset(gc_alloc_table_start, 0, gc_alloc_table_byte_len);
+ memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len));
#if MICROPY_ENABLE_FINALISER
// clear FTBs
- memset(gc_finaliser_table_start, 0, gc_finaliser_table_byte_len);
+ memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
#endif
// set last free ATB index to start of heap
- gc_last_free_atb_index = 0;
+ MP_STATE_MEM(gc_last_free_atb_index) = 0;
// unlock the GC
- gc_lock_depth = 0;
+ MP_STATE_MEM(gc_lock_depth) = 0;
// allow auto collection
- gc_auto_collect_enabled = 1;
+ MP_STATE_MEM(gc_auto_collect_enabled) = 1;
DEBUG_printf("GC layout:\n");
- DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", gc_alloc_table_start, gc_alloc_table_byte_len, gc_alloc_table_byte_len * BLOCKS_PER_ATB);
+ DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
#if MICROPY_ENABLE_FINALISER
- DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", gc_finaliser_table_start, gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
+ DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
#endif
- DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", gc_pool_start, gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
+ DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
}
void gc_lock(void) {
- gc_lock_depth++;
+ MP_STATE_MEM(gc_lock_depth)++;
}
void gc_unlock(void) {
- gc_lock_depth--;
+ MP_STATE_MEM(gc_lock_depth)--;
}
bool gc_is_locked(void) {
- return gc_lock_depth != 0;
+ return MP_STATE_MEM(gc_lock_depth) != 0;
}
#define VERIFY_PTR(ptr) ( \
(ptr & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
- && ptr >= (mp_uint_t)gc_pool_start /* must be above start of pool */ \
- && ptr < (mp_uint_t)gc_pool_end /* must be below end of pool */ \
+ && ptr >= (mp_uint_t)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
+ && ptr < (mp_uint_t)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
)
#define VERIFY_MARK_AND_PUSH(ptr) \
@@ -195,19 +177,19 @@
if (ATB_GET_KIND(_block) == AT_HEAD) { \
/* an unmarked head, mark it, and push it on gc stack */ \
ATB_HEAD_TO_MARK(_block); \
- if (gc_sp < &gc_stack[MICROPY_ALLOC_GC_STACK_SIZE]) { \
- *gc_sp++ = _block; \
+ if (MP_STATE_MEM(gc_sp) < &MP_STATE_MEM(gc_stack)[MICROPY_ALLOC_GC_STACK_SIZE]) { \
+ *MP_STATE_MEM(gc_sp)++ = _block; \
} else { \
- gc_stack_overflow = 1; \
+ MP_STATE_MEM(gc_stack_overflow) = 1; \
} \
} \
} \
} while (0)
STATIC void gc_drain_stack(void) {
- while (gc_sp > gc_stack) {
+ while (MP_STATE_MEM(gc_sp) > MP_STATE_MEM(gc_stack)) {
// pop the next block off the stack
- mp_uint_t block = *--gc_sp;
+ mp_uint_t block = *--MP_STATE_MEM(gc_sp);
// work out number of consecutive blocks in the chain starting with this one
mp_uint_t n_blocks = 0;
@@ -225,15 +207,15 @@
}
STATIC void gc_deal_with_stack_overflow(void) {
- while (gc_stack_overflow) {
- gc_stack_overflow = 0;
- gc_sp = gc_stack;
+ while (MP_STATE_MEM(gc_stack_overflow)) {
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+ MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
// scan entire memory looking for blocks which have been marked but not their children
- for (mp_uint_t block = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
+ for (mp_uint_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
// trace (again) if mark bit set
if (ATB_GET_KIND(block) == AT_MARK) {
- *gc_sp++ = block;
+ *MP_STATE_MEM(gc_sp)++ = block;
gc_drain_stack();
}
}
@@ -250,7 +232,7 @@
#endif
// free unmarked heads and their tails
int free_tail = 0;
- for (mp_uint_t block = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
+ for (mp_uint_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
switch (ATB_GET_KIND(block)) {
case AT_HEAD:
#if MICROPY_ENABLE_FINALISER
@@ -292,8 +274,13 @@
void gc_collect_start(void) {
gc_lock();
- gc_stack_overflow = 0;
- gc_sp = gc_stack;
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+ MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack);
+ // Trace root pointers. This relies on the root pointers being organised
+ // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
+ // dict_globals, then the root pointer section of mp_state_vm.
+ void **ptrs = (void**)(void*)&mp_state_ctx;
+ gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t));
}
void gc_collect_root(void **ptrs, mp_uint_t len) {
@@ -307,18 +294,18 @@
void gc_collect_end(void) {
gc_deal_with_stack_overflow();
gc_sweep();
- gc_last_free_atb_index = 0;
+ MP_STATE_MEM(gc_last_free_atb_index) = 0;
gc_unlock();
}
void gc_info(gc_info_t *info) {
- info->total = (gc_pool_end - gc_pool_start) * sizeof(mp_uint_t);
+ info->total = (MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start)) * sizeof(mp_uint_t);
info->used = 0;
info->free = 0;
info->num_1block = 0;
info->num_2block = 0;
info->max_block = 0;
- for (mp_uint_t block = 0, len = 0; block < gc_alloc_table_byte_len * BLOCKS_PER_ATB; block++) {
+ for (mp_uint_t block = 0, len = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
mp_uint_t kind = ATB_GET_KIND(block);
if (kind == AT_FREE || kind == AT_HEAD) {
if (len == 1) {
@@ -361,7 +348,7 @@
DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
// check if GC is locked
- if (gc_lock_depth > 0) {
+ if (MP_STATE_MEM(gc_lock_depth) > 0) {
return NULL;
}
@@ -374,12 +361,12 @@
mp_uint_t end_block;
mp_uint_t start_block;
mp_uint_t n_free = 0;
- int collected = !gc_auto_collect_enabled;
+ int collected = !MP_STATE_MEM(gc_auto_collect_enabled);
for (;;) {
// look for a run of n_blocks available blocks
- for (i = gc_last_free_atb_index; i < gc_alloc_table_byte_len; i++) {
- byte a = gc_alloc_table_start[i];
+ for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
+ byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
@@ -407,7 +394,7 @@
// before this one. Also, whenever we free or shink a block we must check
// if this index needs adjusting (see gc_realloc and gc_free).
if (n_free == 1) {
- gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB;
+ MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
}
// mark first block as used head
@@ -420,7 +407,7 @@
}
// get pointer to first block
- void *ret_ptr = (void*)(gc_pool_start + start_block * WORDS_PER_BLOCK);
+ void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * WORDS_PER_BLOCK);
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
// zero out the additional bytes of the newly allocated blocks
@@ -458,7 +445,7 @@
// force the freeing of a piece of memory
void gc_free(void *ptr_in) {
- if (gc_lock_depth > 0) {
+ if (MP_STATE_MEM(gc_lock_depth) > 0) {
// TODO how to deal with this error?
return;
}
@@ -470,8 +457,8 @@
mp_uint_t block = BLOCK_FROM_PTR(ptr);
if (ATB_GET_KIND(block) == AT_HEAD) {
// set the last_free pointer to this block if it's earlier in the heap
- if (block / BLOCKS_PER_ATB < gc_last_free_atb_index) {
- gc_last_free_atb_index = block / BLOCKS_PER_ATB;
+ if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
+ MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB;
}
// free head and all of its tail blocks
@@ -540,7 +527,7 @@
#else // Alternative gc_realloc impl
void *gc_realloc(void *ptr_in, mp_uint_t n_bytes) {
- if (gc_lock_depth > 0) {
+ if (MP_STATE_MEM(gc_lock_depth) > 0) {
return NULL;
}
@@ -581,7 +568,7 @@
// efficiently shrink it (see below for shrinking code).
mp_uint_t n_free = 0;
mp_uint_t n_blocks = 1; // counting HEAD block
- mp_uint_t max_block = gc_alloc_table_byte_len * BLOCKS_PER_ATB;
+ mp_uint_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
for (mp_uint_t bl = block + n_blocks; bl < max_block; bl++) {
byte block_type = ATB_GET_KIND(bl);
if (block_type == AT_TAIL) {
@@ -612,8 +599,8 @@
}
// set the last_free pointer to end of this block if it's earlier in the heap
- if ((block + new_blocks) / BLOCKS_PER_ATB < gc_last_free_atb_index) {
- gc_last_free_atb_index = (block + new_blocks) / BLOCKS_PER_ATB;
+ if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) {
+ MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB;
}
#if EXTENSIVE_HEAP_PROFILING
@@ -675,22 +662,22 @@
#if !EXTENSIVE_HEAP_PROFILING
// When comparing heap output we don't want to print the starting
// pointer of the heap because it changes from run to run.
- printf("GC memory layout; from %p:", gc_pool_start);
+ printf("GC memory layout; from %p:", MP_STATE_MEM(gc_pool_start));
#endif
- for (mp_uint_t bl = 0; bl < gc_alloc_table_byte_len * BLOCKS_PER_ATB; bl++) {
+ for (mp_uint_t bl = 0; bl < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; bl++) {
if (bl % DUMP_BYTES_PER_LINE == 0) {
// a new line of blocks
{
// check if this line contains only free blocks
mp_uint_t bl2 = bl;
- while (bl2 < gc_alloc_table_byte_len * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
+ while (bl2 < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
bl2++;
}
if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
// there are at least 2 lines containing only free blocks, so abbreviate their printing
printf("\n (" UINT_FMT " lines all free)", (bl2 - bl) / DUMP_BYTES_PER_LINE);
bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
- if (bl >= gc_alloc_table_byte_len * BLOCKS_PER_ATB) {
+ if (bl >= MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB) {
// got to end of heap
break;
}
@@ -736,7 +723,7 @@
*/
/* this prints the uPy object type of the head block */
case AT_HEAD: {
- mp_uint_t *ptr = gc_pool_start + bl * WORDS_PER_BLOCK;
+ mp_uint_t *ptr = MP_STATE_MEM(gc_pool_start) + bl * WORDS_PER_BLOCK;
if (*ptr == (mp_uint_t)&mp_type_tuple) { c = 'T'; }
else if (*ptr == (mp_uint_t)&mp_type_list) { c = 'L'; }
else if (*ptr == (mp_uint_t)&mp_type_dict) { c = 'D'; }
diff --git a/py/gc.h b/py/gc.h
index c698c7d..bb7e2d4 100644
--- a/py/gc.h
+++ b/py/gc.h
@@ -39,11 +39,6 @@
void gc_unlock(void);
bool gc_is_locked(void);
-// This variable controls auto garbage collection. If set to 0 then the
-// GC won't automatically run when gc_alloc can't find enough blocks. But
-// you can still allocate/free memory and also explicitly call gc_collect.
-extern uint16_t gc_auto_collect_enabled;
-
// A given port must implement gc_collect by using the other collect functions.
void gc_collect(void);
void gc_collect_start(void);
diff --git a/py/lexer.c b/py/lexer.c
index ce6aa78..f4aedb4 100644
--- a/py/lexer.c
+++ b/py/lexer.c
@@ -27,6 +27,7 @@
#include <stdio.h>
#include <assert.h>
+#include "py/mpstate.h"
#include "py/lexer.h"
#define TAB_SIZE (8)
@@ -34,8 +35,6 @@
// TODO seems that CPython allows NULL byte in the input stream
// don't know if that's intentional or not, but we don't allow it
-mp_uint_t mp_optimise_value;
-
// TODO replace with a call to a standard function
STATIC bool str_strn_equal(const char *str, const char *strn, mp_uint_t len) {
mp_uint_t i = 0;
@@ -662,7 +661,7 @@
if (str_strn_equal(tok_kw[i], lex->vstr.buf, lex->vstr.len)) {
if (i == MP_ARRAY_SIZE(tok_kw) - 1) {
// tok_kw[MP_ARRAY_SIZE(tok_kw) - 1] == "__debug__"
- lex->tok_kind = (mp_optimise_value == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
+ lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
} else {
lex->tok_kind = MP_TOKEN_KW_FALSE + i;
}
diff --git a/py/lexer.h b/py/lexer.h
index c2d2b67..3ec6b6d 100644
--- a/py/lexer.h
+++ b/py/lexer.h
@@ -192,6 +192,4 @@
mp_import_stat_t mp_import_stat(const char *path);
mp_lexer_t *mp_lexer_new_from_file(const char *filename);
-extern mp_uint_t mp_optimise_value;
-
#endif // __MICROPY_INCLUDED_PY_LEXER_H__
diff --git a/py/malloc.c b/py/malloc.c
index 10e3566..be2c0db 100644
--- a/py/malloc.c
+++ b/py/malloc.c
@@ -30,6 +30,7 @@
#include "py/mpconfig.h"
#include "py/misc.h"
+#include "py/mpstate.h"
#if 0 // print debugging info
#define DEBUG_printf DEBUG_printf
@@ -38,11 +39,7 @@
#endif
#if MICROPY_MEM_STATS
-STATIC size_t total_bytes_allocated = 0;
-STATIC size_t current_bytes_allocated = 0;
-STATIC size_t peak_bytes_allocated = 0;
-
-#define UPDATE_PEAK() { if (current_bytes_allocated > peak_bytes_allocated) peak_bytes_allocated = current_bytes_allocated; }
+#define UPDATE_PEAK() { if (MP_STATE_MEM(current_bytes_allocated) > MP_STATE_MEM(peak_bytes_allocated)) MP_STATE_MEM(peak_bytes_allocated) = MP_STATE_MEM(current_bytes_allocated); }
#endif
#if MICROPY_ENABLE_GC
@@ -68,8 +65,8 @@
return m_malloc_fail(num_bytes);
}
#if MICROPY_MEM_STATS
- total_bytes_allocated += num_bytes;
- current_bytes_allocated += num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
@@ -79,8 +76,8 @@
void *m_malloc_maybe(size_t num_bytes) {
void *ptr = malloc(num_bytes);
#if MICROPY_MEM_STATS
- total_bytes_allocated += num_bytes;
- current_bytes_allocated += num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
@@ -94,8 +91,8 @@
return m_malloc_fail(num_bytes);
}
#if MICROPY_MEM_STATS
- total_bytes_allocated += num_bytes;
- current_bytes_allocated += num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
@@ -124,8 +121,8 @@
// allocated total. If we process only positive increments,
// we'll count 3K.
size_t diff = new_num_bytes - old_num_bytes;
- total_bytes_allocated += diff;
- current_bytes_allocated += diff;
+ MP_STATE_MEM(total_bytes_allocated) += diff;
+ MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
#endif
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
@@ -143,8 +140,8 @@
// Also, don't count failed reallocs.
if (!(new_ptr == NULL && new_num_bytes != 0)) {
size_t diff = new_num_bytes - old_num_bytes;
- total_bytes_allocated += diff;
- current_bytes_allocated += diff;
+ MP_STATE_MEM(total_bytes_allocated) += diff;
+ MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
}
#endif
@@ -155,21 +152,21 @@
void m_free(void *ptr, size_t num_bytes) {
free(ptr);
#if MICROPY_MEM_STATS
- current_bytes_allocated -= num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
#endif
DEBUG_printf("free %p, %d\n", ptr, num_bytes);
}
#if MICROPY_MEM_STATS
size_t m_get_total_bytes_allocated(void) {
- return total_bytes_allocated;
+ return MP_STATE_MEM(total_bytes_allocated);
}
size_t m_get_current_bytes_allocated(void) {
- return current_bytes_allocated;
+ return MP_STATE_MEM(current_bytes_allocated);
}
size_t m_get_peak_bytes_allocated(void) {
- return peak_bytes_allocated;
+ return MP_STATE_MEM(peak_bytes_allocated);
}
#endif
diff --git a/py/modgc.c b/py/modgc.c
index e3cbe72..38f7c15 100644
--- a/py/modgc.c
+++ b/py/modgc.c
@@ -24,6 +24,7 @@
* THE SOFTWARE.
*/
+#include "py/mpstate.h"
#include "py/obj.h"
#include "py/gc.h"
@@ -48,7 +49,7 @@
/// \function disable()
/// Disable the garbage collector.
STATIC mp_obj_t gc_disable(void) {
- gc_auto_collect_enabled = 0;
+ MP_STATE_MEM(gc_auto_collect_enabled) = 0;
return mp_const_none;
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_disable_obj, gc_disable);
@@ -56,13 +57,13 @@
/// \function enable()
/// Enable the garbage collector.
STATIC mp_obj_t gc_enable(void) {
- gc_auto_collect_enabled = 1;
+ MP_STATE_MEM(gc_auto_collect_enabled) = 1;
return mp_const_none;
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_enable_obj, gc_enable);
STATIC mp_obj_t gc_isenabled(void) {
- return MP_BOOL(gc_auto_collect_enabled);
+ return MP_BOOL(MP_STATE_MEM(gc_auto_collect_enabled));
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_isenabled_obj, gc_isenabled);
diff --git a/py/mpconfig.h b/py/mpconfig.h
index e3270bd..0faa6a7 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -488,6 +488,11 @@
#define MICROPY_PORT_CONSTANTS
#endif
+// Any root pointers for GC scanning - see mpstate.c
+#ifndef MICROPY_PORT_ROOT_POINTERS
+#define MICROPY_PORT_ROOT_POINTERS
+#endif
+
/*****************************************************************************/
/* Miscellaneous settings */
diff --git a/py/mpstate.c b/py/mpstate.c
new file mode 100644
index 0000000..2ba3402
--- /dev/null
+++ b/py/mpstate.c
@@ -0,0 +1,29 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+mp_state_ctx_t mp_state_ctx;
diff --git a/py/mpstate.h b/py/mpstate.h
new file mode 100644
index 0000000..e966bc8
--- /dev/null
+++ b/py/mpstate.h
@@ -0,0 +1,155 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_MPSTATE_H__
+#define __MICROPY_INCLUDED_PY_MPSTATE_H__
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/objexcept.h"
+
+// This file contains structures defining the state of the Micro Python
+// memory system, runtime and virtual machine. The state is a global
+// variable, but in the future it is hoped that the state can become local.
+
+// This structure hold information about the memory allocation system.
+typedef struct _mp_state_mem_t {
+ #if MICROPY_MEM_STATS
+ size_t total_bytes_allocated;
+ size_t current_bytes_allocated;
+ size_t peak_bytes_allocated;
+ #endif
+
+ byte *gc_alloc_table_start;
+ mp_uint_t gc_alloc_table_byte_len;
+ #if MICROPY_ENABLE_FINALISER
+ byte *gc_finaliser_table_start;
+ #endif
+ mp_uint_t *gc_pool_start;
+ mp_uint_t *gc_pool_end;
+
+ int gc_stack_overflow;
+ mp_uint_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
+ mp_uint_t *gc_sp;
+ uint16_t gc_lock_depth;
+
+ // This variable controls auto garbage collection. If set to 0 then the
+ // GC won't automatically run when gc_alloc can't find enough blocks. But
+ // you can still allocate/free memory and also explicitly call gc_collect.
+ uint16_t gc_auto_collect_enabled;
+
+ mp_uint_t gc_last_free_atb_index;
+} mp_state_mem_t;
+
+// This structure hold runtime and VM information. It includes a section
+// which contains root pointers that must be scanned by the GC.
+typedef struct _mp_state_vm_t {
+ ////////////////////////////////////////////////////////////
+ // START ROOT POINTER SECTION
+ // everything that needs GC scanning must go here
+ // this must start at the start of this structure
+ //
+
+ // Note: nlr asm code has the offset of this hard-coded
+ nlr_buf_t *nlr_top;
+
+ qstr_pool_t *last_pool;
+
+ // non-heap memory for creating an exception if we can't allocate RAM
+ mp_obj_exception_t mp_emergency_exception_obj;
+
+ // memory for exception arguments if we can't allocate RAM
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ #if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
+ // statically allocated buf
+ byte mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE];
+ #else
+ // dynamically allocated buf
+ byte *mp_emergency_exception_buf;
+ #endif
+ #endif
+
+ // map with loaded modules
+ // TODO: expose as sys.modules
+ mp_map_t mp_loaded_modules_map;
+
+ // pending exception object (MP_OBJ_NULL if not pending)
+ mp_obj_t mp_pending_exception;
+
+ // dictionary for the __main__ module
+ mp_obj_dict_t dict_main;
+
+ // dictionary for overridden builtins
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ mp_obj_dict_t *mp_module_builtins_override_dict;
+ #endif
+
+ // include any root pointers defined by a port
+ MICROPY_PORT_ROOT_POINTERS
+
+ //
+ // END ROOT POINTER SECTION
+ ////////////////////////////////////////////////////////////
+
+ // Stack top at the start of program
+ // Note: this entry is used to locate the end of the root pointer section.
+ char *stack_top;
+
+ #if MICROPY_STACK_CHECK
+ mp_uint_t stack_limit;
+ #endif
+
+ mp_uint_t mp_optimise_value;
+
+ // size of the emergency exception buf, if it's dynamically allocated
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0
+ mp_int_t mp_emergency_exception_buf_size;
+ #endif
+} mp_state_vm_t;
+
+// This structure combines the above 2 structures, and adds the local
+// and global dicts.
+// Note: if this structure changes then revisit all nlr asm code since they
+// have the offset of nlr_top hard-coded.
+typedef struct _mp_state_ctx_t {
+ // these must come first for root pointer scanning in GC to work
+ mp_obj_dict_t *dict_locals;
+ mp_obj_dict_t *dict_globals;
+ // this must come next for root pointer scanning in GC to work
+ mp_state_vm_t vm;
+ mp_state_mem_t mem;
+} mp_state_ctx_t;
+
+extern mp_state_ctx_t mp_state_ctx;
+
+#define MP_STATE_CTX(x) (mp_state_ctx.x)
+#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
+#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
+
+#endif // __MICROPY_INCLUDED_PY_MPSTATE_H__
diff --git a/py/nlr.h b/py/nlr.h
index 824ed6d..f414588 100644
--- a/py/nlr.h
+++ b/py/nlr.h
@@ -64,14 +64,14 @@
#endif
};
-extern nlr_buf_t *nlr_top;
-
#if MICROPY_NLR_SETJMP
+#include "py/mpstate.h"
+
NORETURN void nlr_setjmp_jump(void *val);
// nlr_push() must be defined as a macro, because "The stack context will be
// invalidated if the function which called setjmp() returns."
-#define nlr_push(buf) ((buf)->prev = nlr_top, nlr_top = (buf), setjmp((buf)->jmpbuf))
-#define nlr_pop() { nlr_top = nlr_top->prev; }
+#define nlr_push(buf) ((buf)->prev = MP_STATE_VM(nlr_top), MP_STATE_VM(nlr_top) = (buf), setjmp((buf)->jmpbuf))
+#define nlr_pop() { MP_STATE_VM(nlr_top) = MP_STATE_VM(nlr_top)->prev; }
#define nlr_jump(val) nlr_setjmp_jump(val)
#else
unsigned int nlr_push(nlr_buf_t *);
diff --git a/py/nlrsetjmp.c b/py/nlrsetjmp.c
index 76d718b..661b650 100644
--- a/py/nlrsetjmp.c
+++ b/py/nlrsetjmp.c
@@ -26,14 +26,11 @@
#include "py/nlr.h"
-// this global variable is used for all nlr implementations
-nlr_buf_t *nlr_top;
-
#if MICROPY_NLR_SETJMP
void nlr_setjmp_jump(void *val) {
- nlr_buf_t *buf = nlr_top;
- nlr_top = buf->prev;
+ nlr_buf_t *buf = MP_STATE_VM(nlr_top);
+ MP_STATE_VM(nlr_top) = buf->prev;
buf->ret_val = val;
longjmp(buf->jmpbuf, 1);
}
diff --git a/py/nlrthumb.S b/py/nlrthumb.S
index 761f835..c6db844 100644
--- a/py/nlrthumb.S
+++ b/py/nlrthumb.S
@@ -32,6 +32,9 @@
// For reference, arm/thumb callee save regs are:
// r4-r11, r13=sp
+// the offset of nlr_top within mp_state_ctx_t
+#define NLR_TOP_OFFSET (2 * 4)
+
.syntax unified
/*.cpu cortex-m4*/
/*.thumb*/
@@ -68,7 +71,7 @@
bx lr @ return
.align 2
nlr_top_addr:
- .word nlr_top
+ .word mp_state_ctx + NLR_TOP_OFFSET
.size nlr_push, .-nlr_push
/**************************************/
diff --git a/py/nlrx64.S b/py/nlrx64.S
index b0c2e74..43298eb 100644
--- a/py/nlrx64.S
+++ b/py/nlrx64.S
@@ -32,6 +32,11 @@
// For reference, x86-64 callee save regs are:
// rbx, rbp, rsp, r12, r13, r14, r15
+// the offset of nlr_top within mp_state_ctx_t
+#define NLR_TOP_OFFSET (2 * 8)
+
+#define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET)
+
.file "nlr.s"
.text
@@ -64,9 +69,9 @@
movq %r13, 56(%rdi) # store %r13 into nlr_buf
movq %r14, 64(%rdi) # store %r14 into nlr_buf
movq %r15, 72(%rdi) # store %r15 into nlr_buf
- movq nlr_top(%rip), %rax # get last nlr_buf
+ movq NLR_TOP(%rip), %rax # get last nlr_buf
movq %rax, (%rdi) # store it
- movq %rdi, nlr_top(%rip) # stor new nlr_buf (to make linked list)
+ movq %rdi, NLR_TOP(%rip) # stor new nlr_buf (to make linked list)
xorq %rax, %rax # return 0, normal return
ret # return
#if !(defined(__APPLE__) && defined(__MACH__))
@@ -84,9 +89,9 @@
.globl _nlr_pop
_nlr_pop:
#endif
- movq nlr_top(%rip), %rax # get nlr_top into %rax
+ movq NLR_TOP(%rip), %rax # get nlr_top into %rax
movq (%rax), %rax # load prev nlr_buf
- movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
+ movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
ret # return
#if !(defined(__APPLE__) && defined(__MACH__))
.size nlr_pop, .-nlr_pop
@@ -104,12 +109,12 @@
_nlr_jump:
#endif
movq %rdi, %rax # put return value in %rax
- movq nlr_top(%rip), %rdi # get nlr_top into %rdi
+ movq NLR_TOP(%rip), %rdi # get nlr_top into %rdi
test %rdi, %rdi # check for nlr_top being NULL
je .fail # fail if nlr_top is NULL
movq %rax, 8(%rdi) # store return value
movq (%rdi), %rax # load prev nlr_buf
- movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
+ movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
movq 72(%rdi), %r15 # load saved %r15
movq 64(%rdi), %r14 # load saved %r14
movq 56(%rdi), %r13 # load saved %r13
@@ -155,9 +160,9 @@
movq %r15, 72(%rcx) # store %r15 into
movq %rdi, 80(%rcx) # store %rdr into
movq %rsi, 88(%rcx) # store %rsi into
- movq nlr_top(%rip), %rax # get last nlr_buf
+ movq NLR_TOP(%rip), %rax # get last nlr_buf
movq %rax, (%rcx) # store it
- movq %rcx, nlr_top(%rip) # stor new nlr_buf (to make linked list)
+ movq %rcx, NLR_TOP(%rip) # stor new nlr_buf (to make linked list)
xorq %rax, %rax # return 0, normal return
ret # return
@@ -166,9 +171,9 @@
.globl nlr_pop
nlr_pop:
- movq nlr_top(%rip), %rax # get nlr_top into %rax
+ movq NLR_TOP(%rip), %rax # get nlr_top into %rax
movq (%rax), %rax # load prev nlr_buf
- movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
+ movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
ret # return
/**************************************/
@@ -177,12 +182,12 @@
.globl nlr_jump
nlr_jump:
movq %rcx, %rax # put return value in %rax
- movq nlr_top(%rip), %rcx # get nlr_top into %rcx
+ movq NLR_TOP(%rip), %rcx # get nlr_top into %rcx
test %rcx, %rcx # check for nlr_top being NULL
je .fail # fail if nlr_top is NULL
movq %rax, 8(%rcx) # store return value
movq (%rcx), %rax # load prev nlr_buf
- movq %rax, nlr_top(%rip) # store prev nlr_buf (to unlink list)
+ movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list)
movq 72(%rcx), %r15 # load saved %r15
movq 64(%rcx), %r14 # load saved %r14
movq 56(%rcx), %r13 # load saved %r13
diff --git a/py/nlrx86.S b/py/nlrx86.S
index 34d0bfc..275dc82 100644
--- a/py/nlrx86.S
+++ b/py/nlrx86.S
@@ -32,10 +32,13 @@
// For reference, x86 callee save regs are:
// ebx, esi, edi, ebp, esp, eip
+// the offset of nlr_top within mp_state_ctx_t
+#define NLR_TOP_OFFSET (2 * 4)
+
#ifdef _WIN32
-#define NLR_TOP _nlr_top
+#define NLR_TOP (_mp_state_ctx + NLR_TOP_OFFSET)
#else
-#define NLR_TOP nlr_top
+#define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET)
#endif
.file "nlr.s"
diff --git a/py/nlrxtensa.S b/py/nlrxtensa.S
index 293fb9f..289996c 100644
--- a/py/nlrxtensa.S
+++ b/py/nlrxtensa.S
@@ -34,11 +34,16 @@
a3-a7 = rest of args
*/
+// the offset of nlr_top within mp_state_ctx_t
+#define NLR_TOP_OFFSET (2 * 4)
+
+#define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET)
+
.file "nlr.s"
.text
.literal_position
- .literal .LC0, nlr_top
+ .literal .LC0, NLR_TOP
.align 4
.global nlr_push
.type nlr_push, @function
@@ -64,7 +69,7 @@
.size nlr_push, .-nlr_push
.literal_position
- .literal .LC1, nlr_top
+ .literal .LC1, NLR_TOP
.align 4
.global nlr_pop
.type nlr_pop, @function
@@ -77,7 +82,7 @@
.size nlr_pop, .-nlr_pop
.literal_position
- .literal .LC2, nlr_top
+ .literal .LC2, NLR_TOP
.align 4
.global nlr_jump
.type nlr_jump, @function
diff --git a/py/objexcept.c b/py/objexcept.c
index a60cbb8..837e2e8 100644
--- a/py/objexcept.c
+++ b/py/objexcept.c
@@ -29,6 +29,7 @@
#include <assert.h>
#include <stdio.h>
+#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/objlist.h"
#include "py/objstr.h"
@@ -36,23 +37,13 @@
#include "py/objtype.h"
#include "py/gc.h"
-typedef struct _mp_obj_exception_t {
- mp_obj_base_t base;
- mp_obj_t traceback; // a list object, holding (file,line,block) as numbers (not Python objects); a hack for now
- mp_obj_tuple_t *args;
-} mp_obj_exception_t;
-
// Instance of MemoryError exception - needed by mp_malloc_fail
const mp_obj_exception_t mp_const_MemoryError_obj = {{&mp_type_MemoryError}, MP_OBJ_NULL, mp_const_empty_tuple};
-// Local non-heap memory for allocating an exception when we run out of RAM
-STATIC mp_obj_exception_t mp_emergency_exception_obj;
-
// Optionally allocated buffer for storing the first argument of an exception
// allocated when the heap is locked.
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
# if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
-STATIC byte mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE];
#define mp_emergency_exception_buf_size MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
void mp_init_emergency_exception_buf(void) {
@@ -62,12 +53,11 @@
}
#else
-STATIC mp_int_t mp_emergency_exception_buf_size = 0;
-STATIC byte *mp_emergency_exception_buf = NULL;
+#define mp_emergency_exception_buf_size MP_STATE_VM(mp_emergency_exception_buf_size)
void mp_init_emergency_exception_buf(void) {
mp_emergency_exception_buf_size = 0;
- mp_emergency_exception_buf = NULL;
+ MP_STATE_VM(mp_emergency_exception_buf) = NULL;
}
mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) {
@@ -78,13 +68,13 @@
}
int old_size = mp_emergency_exception_buf_size;
- void *old_buf = mp_emergency_exception_buf;
+ void *old_buf = MP_STATE_VM(mp_emergency_exception_buf);
// Update the 2 variables atomically so that an interrupt can't occur
// between the assignments.
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_emergency_exception_buf_size = size;
- mp_emergency_exception_buf = buf;
+ MP_STATE_VM(mp_emergency_exception_buf) = buf;
MICROPY_END_ATOMIC_SECTION(atomic_state);
if (old_buf != NULL) {
@@ -134,7 +124,7 @@
mp_obj_exception_t *o = m_new_obj_var_maybe(mp_obj_exception_t, mp_obj_t, 0);
if (o == NULL) {
// Couldn't allocate heap memory; use local data instead.
- o = &mp_emergency_exception_obj;
+ o = &MP_STATE_VM(mp_emergency_exception_obj);
// We can't store any args.
n_args = 0;
o->args = mp_const_empty_tuple;
@@ -308,7 +298,7 @@
if (o == NULL) {
// Couldn't allocate heap memory; use local data instead.
// Unfortunately, we won't be able to format the string...
- o = &mp_emergency_exception_obj;
+ o = &MP_STATE_VM(mp_emergency_exception_obj);
o->base.type = exc_type;
o->traceback = MP_OBJ_NULL;
o->args = mp_const_empty_tuple;
@@ -318,7 +308,7 @@
// of length 1, which has a string object and the string data.
if (mp_emergency_exception_buf_size > (sizeof(mp_obj_tuple_t) + sizeof(mp_obj_str_t) + sizeof(mp_obj_t))) {
- mp_obj_tuple_t *tuple = (mp_obj_tuple_t *)mp_emergency_exception_buf;
+ mp_obj_tuple_t *tuple = (mp_obj_tuple_t *)MP_STATE_VM(mp_emergency_exception_buf);
mp_obj_str_t *str = (mp_obj_str_t *)&tuple->items[1];
tuple->base.type = &mp_type_tuple;
@@ -326,7 +316,7 @@
tuple->items[0] = str;
byte *str_data = (byte *)&str[1];
- uint max_len = mp_emergency_exception_buf + mp_emergency_exception_buf_size
+ uint max_len = MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size
- str_data;
va_list ap;
@@ -340,16 +330,16 @@
o->args = tuple;
- uint offset = &str_data[str->len] - mp_emergency_exception_buf;
+ uint offset = &str_data[str->len] - MP_STATE_VM(mp_emergency_exception_buf);
offset += sizeof(void *) - 1;
offset &= ~(sizeof(void *) - 1);
if ((mp_emergency_exception_buf_size - offset) > (sizeof(mp_obj_list_t) + sizeof(mp_obj_t) * 3)) {
// We have room to store some traceback.
- mp_obj_list_t *list = (mp_obj_list_t *)((byte *)mp_emergency_exception_buf + offset);
+ mp_obj_list_t *list = (mp_obj_list_t *)((byte *)MP_STATE_VM(mp_emergency_exception_buf) + offset);
list->base.type = &mp_type_list;
list->items = (mp_obj_t)&list[1];
- list->alloc = (mp_emergency_exception_buf + mp_emergency_exception_buf_size - (byte *)list->items) / sizeof(list->items[0]);
+ list->alloc = (MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size - (byte *)list->items) / sizeof(list->items[0]);
list->len = 0;
o->traceback = list;
diff --git a/py/objexcept.h b/py/objexcept.h
new file mode 100644
index 0000000..d6cfacd
--- /dev/null
+++ b/py/objexcept.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
+#define __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
+
+#include "py/obj.h"
+#include "py/objtuple.h"
+
+typedef struct _mp_obj_exception_t {
+ mp_obj_base_t base;
+ mp_obj_t traceback; // a list object, holding (file,line,block) as numbers (not Python objects); a hack for now
+ mp_obj_tuple_t *args;
+} mp_obj_exception_t;
+
+#endif // __MICROPY_INCLUDED_PY_OBJEXCEPT_H__
diff --git a/py/objmodule.c b/py/objmodule.c
index 2e53bf0..dd935ae 100644
--- a/py/objmodule.c
+++ b/py/objmodule.c
@@ -27,13 +27,12 @@
#include <stdlib.h>
#include <assert.h>
+#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/objmodule.h"
#include "py/runtime.h"
#include "py/builtin.h"
-STATIC mp_map_t mp_loaded_modules_map; // TODO: expose as sys.modules
-
STATIC void module_print(void (*print)(void *env, const char *fmt, ...), void *env, mp_obj_t self_in, mp_print_kind_t kind) {
mp_obj_module_t *self = self_in;
const char *name = qstr_str(self->name);
@@ -65,10 +64,10 @@
if (dict->map.table_is_fixed_array) {
#if MICROPY_CAN_OVERRIDE_BUILTINS
if (dict == &mp_module_builtins_globals) {
- if (mp_module_builtins_override_dict == NULL) {
- mp_module_builtins_override_dict = mp_obj_new_dict(1);
+ if (MP_STATE_VM(mp_module_builtins_override_dict) == NULL) {
+ MP_STATE_VM(mp_module_builtins_override_dict) = mp_obj_new_dict(1);
}
- dict = mp_module_builtins_override_dict;
+ dict = MP_STATE_VM(mp_module_builtins_override_dict);
} else
#endif
{
@@ -96,7 +95,7 @@
};
mp_obj_t mp_obj_new_module(qstr module_name) {
- mp_map_elem_t *el = mp_map_lookup(&mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ mp_map_elem_t *el = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_map), MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
// We could error out if module already exists, but let C extensions
// add new members to existing modules.
if (el->value != MP_OBJ_NULL) {
@@ -192,17 +191,17 @@
STATIC MP_DEFINE_CONST_MAP(mp_builtin_module_map, mp_builtin_module_table);
void mp_module_init(void) {
- mp_map_init(&mp_loaded_modules_map, 3);
+ mp_map_init(&MP_STATE_VM(mp_loaded_modules_map), 3);
}
void mp_module_deinit(void) {
- mp_map_deinit(&mp_loaded_modules_map);
+ mp_map_deinit(&MP_STATE_VM(mp_loaded_modules_map));
}
// returns MP_OBJ_NULL if not found
mp_obj_t mp_module_get(qstr module_name) {
// lookup module
- mp_map_elem_t *el = mp_map_lookup(&mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
+ mp_map_elem_t *el = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_map), MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
if (el == NULL) {
// module not found, look for builtin module names
@@ -217,5 +216,5 @@
}
void mp_module_register(qstr qstr, mp_obj_t module) {
- mp_map_lookup(&mp_loaded_modules_map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
+ mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_map), MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
}
diff --git a/py/py.mk b/py/py.mk
index 4c4094e..a8b5af3 100644
--- a/py/py.mk
+++ b/py/py.mk
@@ -12,6 +12,7 @@
# py object files
PY_O_BASENAME = \
+ mpstate.o \
nlrx86.o \
nlrx64.o \
nlrthumb.o \
diff --git a/py/qstr.c b/py/qstr.c
index 1bca8a1..2ede730 100644
--- a/py/qstr.c
+++ b/py/qstr.c
@@ -27,6 +27,7 @@
#include <assert.h>
#include <string.h>
+#include "py/mpstate.h"
#include "py/qstr.h"
#include "py/gc.h"
@@ -68,14 +69,6 @@
return hash;
}
-typedef struct _qstr_pool_t {
- struct _qstr_pool_t *prev;
- mp_uint_t total_prev_len;
- mp_uint_t alloc;
- mp_uint_t len;
- const byte *qstrs[];
-} qstr_pool_t;
-
STATIC const qstr_pool_t const_pool = {
NULL, // no previous pool
0, // no previous pool
@@ -90,15 +83,13 @@
},
};
-STATIC qstr_pool_t *last_pool;
-
void qstr_init(void) {
- last_pool = (qstr_pool_t*)&const_pool; // we won't modify the const_pool since it has no allocated room left
+ MP_STATE_VM(last_pool) = (qstr_pool_t*)&const_pool; // we won't modify the const_pool since it has no allocated room left
}
STATIC const byte *find_qstr(qstr q) {
// search pool for this qstr
- for (qstr_pool_t *pool = last_pool; pool != NULL; pool = pool->prev) {
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
if (q >= pool->total_prev_len) {
return pool->qstrs[q - pool->total_prev_len];
}
@@ -112,21 +103,21 @@
DEBUG_printf("QSTR: add hash=%d len=%d data=%.*s\n", Q_GET_HASH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_DATA(q_ptr));
// make sure we have room in the pool for a new qstr
- if (last_pool->len >= last_pool->alloc) {
- qstr_pool_t *pool = m_new_obj_var(qstr_pool_t, const char*, last_pool->alloc * 2);
- pool->prev = last_pool;
- pool->total_prev_len = last_pool->total_prev_len + last_pool->len;
- pool->alloc = last_pool->alloc * 2;
+ if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) {
+ qstr_pool_t *pool = m_new_obj_var(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2);
+ pool->prev = MP_STATE_VM(last_pool);
+ pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len;
+ pool->alloc = MP_STATE_VM(last_pool)->alloc * 2;
pool->len = 0;
- last_pool = pool;
- DEBUG_printf("QSTR: allocate new pool of size %d\n", last_pool->alloc);
+ MP_STATE_VM(last_pool) = pool;
+ DEBUG_printf("QSTR: allocate new pool of size %d\n", MP_STATE_VM(last_pool)->alloc);
}
// add the new qstr
- last_pool->qstrs[last_pool->len++] = q_ptr;
+ MP_STATE_VM(last_pool)->qstrs[MP_STATE_VM(last_pool)->len++] = q_ptr;
// return id for the newly-added qstr
- return last_pool->total_prev_len + last_pool->len - 1;
+ return MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len - 1;
}
qstr qstr_find_strn(const char *str, mp_uint_t str_len) {
@@ -134,7 +125,7 @@
mp_uint_t str_hash = qstr_compute_hash((const byte*)str, str_len);
// search pools for the data
- for (qstr_pool_t *pool = last_pool; pool != NULL; pool = pool->prev) {
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
if (Q_GET_HASH(*q) == str_hash && Q_GET_LENGTH(*q) == str_len && memcmp(Q_GET_DATA(*q), str, str_len) == 0) {
return pool->total_prev_len + (q - pool->qstrs);
@@ -215,7 +206,7 @@
*n_qstr = 0;
*n_str_data_bytes = 0;
*n_total_bytes = 0;
- for (qstr_pool_t *pool = last_pool; pool != NULL && pool != &const_pool; pool = pool->prev) {
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &const_pool; pool = pool->prev) {
*n_pool += 1;
*n_qstr += pool->len;
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
diff --git a/py/qstr.h b/py/qstr.h
index a383452..d5ec1fe 100644
--- a/py/qstr.h
+++ b/py/qstr.h
@@ -46,6 +46,14 @@
typedef mp_uint_t qstr;
+typedef struct _qstr_pool_t {
+ struct _qstr_pool_t *prev;
+ mp_uint_t total_prev_len;
+ mp_uint_t alloc;
+ mp_uint_t len;
+ const byte *qstrs[];
+} qstr_pool_t;
+
#define QSTR_FROM_STR_STATIC(s) (qstr_from_strn((s), strlen(s)))
void qstr_init(void);
diff --git a/py/runtime.c b/py/runtime.c
index c4c73df..54d1207 100644
--- a/py/runtime.c
+++ b/py/runtime.c
@@ -28,6 +28,7 @@
#include <string.h>
#include <assert.h>
+#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/parsehelper.h"
#include "py/parsenum.h"
@@ -52,32 +53,18 @@
#define DEBUG_OP_printf(...) (void)0
#endif
-// pending exception object (MP_OBJ_NULL if not pending)
-mp_obj_t mp_pending_exception;
-
-// locals and globals need to be pointers because they can be the same in outer module scope
-STATIC mp_obj_dict_t *dict_locals;
-STATIC mp_obj_dict_t *dict_globals;
-
-// dictionary for the __main__ module
-STATIC mp_obj_dict_t dict_main;
-
const mp_obj_module_t mp_module___main__ = {
.base = { &mp_type_module },
.name = MP_QSTR___main__,
- .globals = (mp_obj_dict_t*)&dict_main,
+ .globals = (mp_obj_dict_t*)&MP_STATE_VM(dict_main),
};
-#if MICROPY_CAN_OVERRIDE_BUILTINS
-mp_obj_dict_t *mp_module_builtins_override_dict;
-#endif
-
void mp_init(void) {
qstr_init();
mp_stack_ctrl_init();
// no pending exceptions to start with
- mp_pending_exception = MP_OBJ_NULL;
+ MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
mp_init_emergency_exception_buf();
@@ -89,21 +76,21 @@
#endif
// optimization disabled by default
- mp_optimise_value = 0;
+ MP_STATE_VM(mp_optimise_value) = 0;
// init global module stuff
mp_module_init();
// initialise the __main__ module
- mp_obj_dict_init(&dict_main, 1);
- mp_obj_dict_store(&dict_main, MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+ mp_obj_dict_init(&MP_STATE_VM(dict_main), 1);
+ mp_obj_dict_store(&MP_STATE_VM(dict_main), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
// locals = globals for outer module (see Objects/frameobject.c/PyFrame_New())
- dict_locals = dict_globals = &dict_main;
+ MP_STATE_CTX(dict_locals) = MP_STATE_CTX(dict_globals) = &MP_STATE_VM(dict_main);
#if MICROPY_CAN_OVERRIDE_BUILTINS
// start with no extensions to builtins
- mp_module_builtins_override_dict = NULL;
+ MP_STATE_VM(mp_module_builtins_override_dict) = NULL;
#endif
}
@@ -147,8 +134,8 @@
// logic: search locals, globals, builtins
DEBUG_OP_printf("load name %s\n", qstr_str(qstr));
// If we're at the outer scope (locals == globals), dispatch to load_global right away
- if (dict_locals != dict_globals) {
- mp_map_elem_t *elem = mp_map_lookup(&dict_locals->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
+ if (MP_STATE_CTX(dict_locals) != MP_STATE_CTX(dict_globals)) {
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_locals)->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (elem != NULL) {
return elem->value;
}
@@ -159,12 +146,12 @@
mp_obj_t mp_load_global(qstr qstr) {
// logic: search globals, builtins
DEBUG_OP_printf("load global %s\n", qstr_str(qstr));
- mp_map_elem_t *elem = mp_map_lookup(&dict_globals->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_CTX(dict_globals)->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (elem == NULL) {
#if MICROPY_CAN_OVERRIDE_BUILTINS
- if (mp_module_builtins_override_dict != NULL) {
+ if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
// lookup in additional dynamic table of builtins first
- elem = mp_map_lookup(&mp_module_builtins_override_dict->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
+ elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(qstr), MP_MAP_LOOKUP);
if (elem != NULL) {
return elem->value;
}
@@ -187,9 +174,9 @@
mp_obj_t mp_load_build_class(void) {
DEBUG_OP_printf("load_build_class\n");
#if MICROPY_CAN_OVERRIDE_BUILTINS
- if (mp_module_builtins_override_dict != NULL) {
+ if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
// lookup in additional dynamic table of builtins first
- mp_map_elem_t *elem = mp_map_lookup(&mp_module_builtins_override_dict->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
if (elem != NULL) {
return elem->value;
}
@@ -200,24 +187,24 @@
void mp_store_name(qstr qstr, mp_obj_t obj) {
DEBUG_OP_printf("store name %s <- %p\n", qstr_str(qstr), obj);
- mp_obj_dict_store(dict_locals, MP_OBJ_NEW_QSTR(qstr), obj);
+ mp_obj_dict_store(MP_STATE_CTX(dict_locals), MP_OBJ_NEW_QSTR(qstr), obj);
}
void mp_delete_name(qstr qstr) {
DEBUG_OP_printf("delete name %s\n", qstr_str(qstr));
// TODO convert KeyError to NameError if qstr not found
- mp_obj_dict_delete(dict_locals, MP_OBJ_NEW_QSTR(qstr));
+ mp_obj_dict_delete(MP_STATE_CTX(dict_locals), MP_OBJ_NEW_QSTR(qstr));
}
void mp_store_global(qstr qstr, mp_obj_t obj) {
DEBUG_OP_printf("store global %s <- %p\n", qstr_str(qstr), obj);
- mp_obj_dict_store(dict_globals, MP_OBJ_NEW_QSTR(qstr), obj);
+ mp_obj_dict_store(MP_STATE_CTX(dict_globals), MP_OBJ_NEW_QSTR(qstr), obj);
}
void mp_delete_global(qstr qstr) {
DEBUG_OP_printf("delete global %s\n", qstr_str(qstr));
// TODO convert KeyError to NameError if qstr not found
- mp_obj_dict_delete(dict_globals, MP_OBJ_NEW_QSTR(qstr));
+ mp_obj_dict_delete(MP_STATE_CTX(dict_globals), MP_OBJ_NEW_QSTR(qstr));
}
mp_obj_t mp_unary_op(mp_uint_t op, mp_obj_t arg) {
@@ -1241,24 +1228,6 @@
}
}
-mp_obj_dict_t *mp_locals_get(void) {
- return dict_locals;
-}
-
-void mp_locals_set(mp_obj_dict_t *d) {
- DEBUG_OP_printf("mp_locals_set(%p)\n", d);
- dict_locals = d;
-}
-
-mp_obj_dict_t *mp_globals_get(void) {
- return dict_globals;
-}
-
-void mp_globals_set(mp_obj_dict_t *d) {
- DEBUG_OP_printf("mp_globals_set(%p)\n", d);
- dict_globals = d;
-}
-
// this is implemented in this file so it can optimise access to locals/globals
mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
// parse the string
diff --git a/py/runtime.h b/py/runtime.h
index 711b15f..7a9c6ee 100644
--- a/py/runtime.h
+++ b/py/runtime.h
@@ -26,6 +26,7 @@
#ifndef __MICROPY_INCLUDED_PY_RUNTIME_H__
#define __MICROPY_INCLUDED_PY_RUNTIME_H__
+#include "py/mpstate.h"
#include "py/obj.h"
typedef enum {
@@ -64,10 +65,10 @@
NORETURN void mp_arg_error_terse_mismatch(void);
NORETURN void mp_arg_error_unimpl_kw(void);
-mp_obj_dict_t *mp_locals_get(void);
-void mp_locals_set(mp_obj_dict_t *d);
-mp_obj_dict_t *mp_globals_get(void);
-void mp_globals_set(mp_obj_dict_t *d);
+static inline mp_obj_dict_t *mp_locals_get(void) { return MP_STATE_CTX(dict_locals); }
+static inline void mp_locals_set(mp_obj_dict_t *d) { MP_STATE_CTX(dict_locals) = d; }
+static inline mp_obj_dict_t *mp_globals_get(void) { return MP_STATE_CTX(dict_globals); }
+static inline void mp_globals_set(mp_obj_dict_t *d) { MP_STATE_CTX(dict_globals) = d; }
mp_obj_t mp_load_name(qstr qstr);
mp_obj_t mp_load_global(qstr qstr);
@@ -120,7 +121,6 @@
mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, mp_uint_t n_args_kw, const mp_obj_t *args);
NORETURN void mp_native_raise(mp_obj_t o);
-extern mp_obj_t mp_pending_exception;
extern struct _mp_obj_list_t mp_sys_path_obj;
extern struct _mp_obj_list_t mp_sys_argv_obj;
#define mp_sys_path ((mp_obj_t)&mp_sys_path_obj)
diff --git a/py/stackctrl.c b/py/stackctrl.c
index 31336ec..bf6a815 100644
--- a/py/stackctrl.c
+++ b/py/stackctrl.c
@@ -24,34 +24,30 @@
* THE SOFTWARE.
*/
+#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/obj.h"
#include "py/stackctrl.h"
-// Stack top at the start of program
-char *stack_top;
-
void mp_stack_ctrl_init(void) {
volatile int stack_dummy;
- stack_top = (char*)&stack_dummy;
+ MP_STATE_VM(stack_top) = (char*)&stack_dummy;
}
mp_uint_t mp_stack_usage(void) {
// Assumes descending stack
volatile int stack_dummy;
- return stack_top - (char*)&stack_dummy;
+ return MP_STATE_VM(stack_top) - (char*)&stack_dummy;
}
#if MICROPY_STACK_CHECK
-static mp_uint_t stack_limit = 10240;
-
void mp_stack_set_limit(mp_uint_t limit) {
- stack_limit = limit;
+ MP_STATE_VM(stack_limit) = limit;
}
void mp_stack_check(void) {
- if (mp_stack_usage() >= stack_limit) {
+ if (mp_stack_usage() >= MP_STATE_VM(stack_limit)) {
nlr_raise(mp_obj_new_exception_msg(&mp_type_RuntimeError, "maximum recursion depth exceeded"));
}
}
diff --git a/py/vm.c b/py/vm.c
index b0f7783..72dfb8a 100644
--- a/py/vm.c
+++ b/py/vm.c
@@ -29,6 +29,7 @@
#include <string.h>
#include <assert.h>
+#include "py/mpstate.h"
#include "py/nlr.h"
#include "py/emitglue.h"
#include "py/runtime.h"
@@ -991,10 +992,10 @@
#endif
pending_exception_check:
- if (mp_pending_exception != MP_OBJ_NULL) {
+ if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
MARK_EXC_IP_SELECTIVE();
- mp_obj_t obj = mp_pending_exception;
- mp_pending_exception = MP_OBJ_NULL;
+ mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
+ MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
RAISE(obj);
}