Damien George | 04b9147 | 2014-05-03 23:27:38 +0100 | [diff] [blame] | 1 | /* |
Alexander Steffen | 55f3324 | 2017-06-30 09:22:17 +0200 | [diff] [blame] | 2 | * This file is part of the MicroPython project, http://micropython.org/ |
Damien George | 04b9147 | 2014-05-03 23:27:38 +0100 | [diff] [blame] | 3 | * |
| 4 | * The MIT License (MIT) |
| 5 | * |
| 6 | * Copyright (c) 2013, 2014 Damien P. George |
| 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | * of this software and associated documentation files (the "Software"), to deal |
| 10 | * in the Software without restriction, including without limitation the rights |
| 11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 12 | * copies of the Software, and to permit persons to whom the Software is |
| 13 | * furnished to do so, subject to the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice shall be included in |
| 16 | * all copies or substantial portions of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 24 | * THE SOFTWARE. |
| 25 | */ |
| 26 | |
Paul Sokolovsky | c86889d | 2014-04-20 11:45:16 +0300 | [diff] [blame] | 27 | #include <assert.h> |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 28 | #include <stdio.h> |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 29 | #include <string.h> |
| 30 | |
Damien George | 51dfcb4 | 2015-01-01 20:27:54 +0000 | [diff] [blame] | 31 | #include "py/gc.h" |
Damien George | 51dfcb4 | 2015-01-01 20:27:54 +0000 | [diff] [blame] | 32 | #include "py/runtime.h" |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 33 | |
Damien George | d3ebe48 | 2014-01-07 15:20:33 +0000 | [diff] [blame] | 34 | #if MICROPY_ENABLE_GC |
| 35 | |
Stefan Naumann | ace9fb5 | 2017-07-24 18:55:14 +0200 | [diff] [blame] | 36 | #if MICROPY_DEBUG_VERBOSE // print debugging info |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 37 | #define DEBUG_PRINT (1) |
Paul Sokolovsky | 44739e2 | 2014-02-16 18:11:42 +0200 | [diff] [blame] | 38 | #define DEBUG_printf DEBUG_printf |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 39 | #else // don't print debugging info |
Damien George | 7860c2a | 2014-11-05 21:16:41 +0000 | [diff] [blame] | 40 | #define DEBUG_PRINT (0) |
Damien George | 41eb608 | 2014-02-26 22:40:35 +0000 | [diff] [blame] | 41 | #define DEBUG_printf(...) (void)0 |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 42 | #endif |
| 43 | |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 44 | // make this 1 to dump the heap each time it changes |
| 45 | #define EXTENSIVE_HEAP_PROFILING (0) |
| 46 | |
Paul Sokolovsky | 9ef4be8 | 2017-12-08 00:10:44 +0200 | [diff] [blame] | 47 | // make this 1 to zero out swept memory to more eagerly |
| 48 | // detect untraced object still in use |
| 49 | #define CLEAR_ON_SWEEP (0) |
| 50 | |
Paul Sokolovsky | 75feece | 2015-12-03 01:40:52 +0200 | [diff] [blame] | 51 | #define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / BYTES_PER_WORD) |
| 52 | #define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK) |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 53 | |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 54 | // ATB = allocation table byte |
| 55 | // 0b00 = FREE -- free block |
| 56 | // 0b01 = HEAD -- head of a chain of blocks |
| 57 | // 0b10 = TAIL -- in the tail of a chain of blocks |
| 58 | // 0b11 = MARK -- marked head block |
| 59 | |
| 60 | #define AT_FREE (0) |
| 61 | #define AT_HEAD (1) |
| 62 | #define AT_TAIL (2) |
| 63 | #define AT_MARK (3) |
| 64 | |
| 65 | #define BLOCKS_PER_ATB (4) |
| 66 | #define ATB_MASK_0 (0x03) |
| 67 | #define ATB_MASK_1 (0x0c) |
| 68 | #define ATB_MASK_2 (0x30) |
| 69 | #define ATB_MASK_3 (0xc0) |
| 70 | |
| 71 | #define ATB_0_IS_FREE(a) (((a) & ATB_MASK_0) == 0) |
| 72 | #define ATB_1_IS_FREE(a) (((a) & ATB_MASK_1) == 0) |
| 73 | #define ATB_2_IS_FREE(a) (((a) & ATB_MASK_2) == 0) |
| 74 | #define ATB_3_IS_FREE(a) (((a) & ATB_MASK_3) == 0) |
| 75 | |
| 76 | #define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1))) |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 77 | #define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3) |
| 78 | #define ATB_ANY_TO_FREE(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0) |
| 79 | #define ATB_FREE_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0) |
| 80 | #define ATB_FREE_TO_TAIL(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0) |
| 81 | #define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0) |
| 82 | #define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0) |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 83 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 84 | #define BLOCK_FROM_PTR(ptr) (((byte*)(ptr) - MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK) |
| 85 | #define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (uintptr_t)MP_STATE_MEM(gc_pool_start))) |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 86 | #define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB) |
| 87 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 88 | #if MICROPY_ENABLE_FINALISER |
| 89 | // FTB = finaliser table byte |
| 90 | // if set, then the corresponding block may have a finaliser |
| 91 | |
| 92 | #define BLOCKS_PER_FTB (8) |
| 93 | |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 94 | #define FTB_GET(block) ((MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1) |
| 95 | #define FTB_SET(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0) |
| 96 | #define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0) |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 97 | #endif |
| 98 | |
Damien George | a1c93a6 | 2016-05-26 10:53:34 +0000 | [diff] [blame] | 99 | #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 100 | #define GC_ENTER() mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1) |
| 101 | #define GC_EXIT() mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex)) |
| 102 | #else |
| 103 | #define GC_ENTER() |
| 104 | #define GC_EXIT() |
| 105 | #endif |
| 106 | |
Damien | bb5316b | 2013-10-22 21:12:29 +0100 | [diff] [blame] | 107 | // TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool |
| 108 | void gc_init(void *start, void *end) { |
| 109 | // align end pointer on block boundary |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 110 | end = (void*)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1))); |
stijn | def10ce | 2014-06-18 10:20:41 +0200 | [diff] [blame] | 111 | DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte*)end - (byte*)start); |
Damien | bb5316b | 2013-10-22 21:12:29 +0100 | [diff] [blame] | 112 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 113 | // calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes): |
| 114 | // T = A + F + P |
| 115 | // F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB |
| 116 | // P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK |
| 117 | // => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK) |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 118 | size_t total_byte_len = (byte*)end - (byte*)start; |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 119 | #if MICROPY_ENABLE_FINALISER |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 120 | MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 121 | #else |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 122 | MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 123 | #endif |
| 124 | |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 125 | MP_STATE_MEM(gc_alloc_table_start) = (byte*)start; |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 126 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 127 | #if MICROPY_ENABLE_FINALISER |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 128 | size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB; |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 129 | MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 130 | #endif |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 131 | |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 132 | size_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 133 | MP_STATE_MEM(gc_pool_start) = (byte*)end - gc_pool_block_len * BYTES_PER_BLOCK; |
| 134 | MP_STATE_MEM(gc_pool_end) = end; |
Damien | bb5316b | 2013-10-22 21:12:29 +0100 | [diff] [blame] | 135 | |
Damien George | a1d3ee3 | 2014-08-08 12:33:49 +0100 | [diff] [blame] | 136 | #if MICROPY_ENABLE_FINALISER |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 137 | assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len); |
Damien George | a1d3ee3 | 2014-08-08 12:33:49 +0100 | [diff] [blame] | 138 | #endif |
| 139 | |
Damien | bb5316b | 2013-10-22 21:12:29 +0100 | [diff] [blame] | 140 | // clear ATBs |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 141 | memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len)); |
Damien | bb5316b | 2013-10-22 21:12:29 +0100 | [diff] [blame] | 142 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 143 | #if MICROPY_ENABLE_FINALISER |
| 144 | // clear FTBs |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 145 | memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 146 | #endif |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 147 | |
Damien George | d5e7f6e | 2014-08-22 18:17:02 +0100 | [diff] [blame] | 148 | // set last free ATB index to start of heap |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 149 | MP_STATE_MEM(gc_last_free_atb_index) = 0; |
Damien George | d5e7f6e | 2014-08-22 18:17:02 +0100 | [diff] [blame] | 150 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 151 | // unlock the GC |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 152 | MP_STATE_MEM(gc_lock_depth) = 0; |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 153 | |
Damien George | 109c1de | 2014-10-31 21:30:46 +0000 | [diff] [blame] | 154 | // allow auto collection |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 155 | MP_STATE_MEM(gc_auto_collect_enabled) = 1; |
Damien George | 109c1de | 2014-10-31 21:30:46 +0000 | [diff] [blame] | 156 | |
Paul Sokolovsky | 93e353e | 2016-07-21 00:37:30 +0300 | [diff] [blame] | 157 | #if MICROPY_GC_ALLOC_THRESHOLD |
| 158 | // by default, maxuint for gc threshold, effectively turning gc-by-threshold off |
| 159 | MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1; |
| 160 | MP_STATE_MEM(gc_alloc_amount) = 0; |
| 161 | #endif |
| 162 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 163 | #if MICROPY_PY_THREAD |
| 164 | mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex)); |
| 165 | #endif |
| 166 | |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 167 | DEBUG_printf("GC layout:\n"); |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 168 | DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 169 | #if MICROPY_ENABLE_FINALISER |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 170 | DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 171 | #endif |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 172 | DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len); |
Damien | bb5316b | 2013-10-22 21:12:29 +0100 | [diff] [blame] | 173 | } |
| 174 | |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 175 | void gc_lock(void) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 176 | GC_ENTER(); |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 177 | MP_STATE_MEM(gc_lock_depth)++; |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 178 | GC_EXIT(); |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | void gc_unlock(void) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 182 | GC_ENTER(); |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 183 | MP_STATE_MEM(gc_lock_depth)--; |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 184 | GC_EXIT(); |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 185 | } |
| 186 | |
Dave Hylands | 2fe841d | 2014-06-30 22:49:21 -0700 | [diff] [blame] | 187 | bool gc_is_locked(void) { |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 188 | return MP_STATE_MEM(gc_lock_depth) != 0; |
Dave Hylands | 2fe841d | 2014-06-30 22:49:21 -0700 | [diff] [blame] | 189 | } |
| 190 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 191 | // ptr should be of type void* |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 192 | #define VERIFY_PTR(ptr) ( \ |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 193 | ((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \ |
| 194 | && ptr >= (void*)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \ |
| 195 | && ptr < (void*)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \ |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 196 | ) |
| 197 | |
Paul Sokolovsky | 5453d88 | 2017-12-09 01:48:26 +0200 | [diff] [blame] | 198 | #ifndef TRACE_MARK |
| 199 | #if DEBUG_PRINT |
| 200 | #define TRACE_MARK(block, ptr) DEBUG_printf("gc_mark(%p)\n", ptr) |
| 201 | #else |
| 202 | #define TRACE_MARK(block, ptr) |
| 203 | #endif |
| 204 | #endif |
| 205 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 206 | // ptr should be of type void* |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 207 | #define VERIFY_MARK_AND_PUSH(ptr) \ |
| 208 | do { \ |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 209 | if (VERIFY_PTR(ptr)) { \ |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 210 | size_t _block = BLOCK_FROM_PTR(ptr); \ |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 211 | if (ATB_GET_KIND(_block) == AT_HEAD) { \ |
| 212 | /* an unmarked head, mark it, and push it on gc stack */ \ |
Paul Sokolovsky | 5453d88 | 2017-12-09 01:48:26 +0200 | [diff] [blame] | 213 | TRACE_MARK(_block, ptr); \ |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 214 | ATB_HEAD_TO_MARK(_block); \ |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 215 | if (MP_STATE_MEM(gc_sp) < &MP_STATE_MEM(gc_stack)[MICROPY_ALLOC_GC_STACK_SIZE]) { \ |
| 216 | *MP_STATE_MEM(gc_sp)++ = _block; \ |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 217 | } else { \ |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 218 | MP_STATE_MEM(gc_stack_overflow) = 1; \ |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 219 | } \ |
| 220 | } \ |
| 221 | } \ |
| 222 | } while (0) |
| 223 | |
Paul Sokolovsky | 520e2f5 | 2014-02-12 18:31:30 +0200 | [diff] [blame] | 224 | STATIC void gc_drain_stack(void) { |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 225 | while (MP_STATE_MEM(gc_sp) > MP_STATE_MEM(gc_stack)) { |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 226 | // pop the next block off the stack |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 227 | size_t block = *--MP_STATE_MEM(gc_sp); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 228 | |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 229 | // work out number of consecutive blocks in the chain starting with this one |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 230 | size_t n_blocks = 0; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 231 | do { |
| 232 | n_blocks += 1; |
| 233 | } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL); |
| 234 | |
| 235 | // check this block's children |
Damien George | 969e4bb | 2015-12-16 19:41:37 -0500 | [diff] [blame] | 236 | void **ptrs = (void**)PTR_FROM_BLOCK(block); |
| 237 | for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void*); i > 0; i--, ptrs++) { |
| 238 | void *ptr = *ptrs; |
| 239 | VERIFY_MARK_AND_PUSH(ptr); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 240 | } |
| 241 | } |
| 242 | } |
| 243 | |
Paul Sokolovsky | 520e2f5 | 2014-02-12 18:31:30 +0200 | [diff] [blame] | 244 | STATIC void gc_deal_with_stack_overflow(void) { |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 245 | while (MP_STATE_MEM(gc_stack_overflow)) { |
| 246 | MP_STATE_MEM(gc_stack_overflow) = 0; |
| 247 | MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 248 | |
| 249 | // scan entire memory looking for blocks which have been marked but not their children |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 250 | for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) { |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 251 | // trace (again) if mark bit set |
| 252 | if (ATB_GET_KIND(block) == AT_MARK) { |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 253 | *MP_STATE_MEM(gc_sp)++ = block; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 254 | gc_drain_stack(); |
| 255 | } |
| 256 | } |
| 257 | } |
| 258 | } |
| 259 | |
Paul Sokolovsky | 520e2f5 | 2014-02-12 18:31:30 +0200 | [diff] [blame] | 260 | STATIC void gc_sweep(void) { |
Paul Sokolovsky | 755a55f | 2014-06-05 22:48:02 +0300 | [diff] [blame] | 261 | #if MICROPY_PY_GC_COLLECT_RETVAL |
Damien George | e1e359f | 2015-02-07 17:24:10 +0000 | [diff] [blame] | 262 | MP_STATE_MEM(gc_collected) = 0; |
Paul Sokolovsky | 755a55f | 2014-06-05 22:48:02 +0300 | [diff] [blame] | 263 | #endif |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 264 | // free unmarked heads and their tails |
| 265 | int free_tail = 0; |
Damien George | f7782f8 | 2015-12-16 19:45:42 -0500 | [diff] [blame] | 266 | for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) { |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 267 | switch (ATB_GET_KIND(block)) { |
| 268 | case AT_HEAD: |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 269 | #if MICROPY_ENABLE_FINALISER |
| 270 | if (FTB_GET(block)) { |
Damien George | f7782f8 | 2015-12-16 19:45:42 -0500 | [diff] [blame] | 271 | mp_obj_base_t *obj = (mp_obj_base_t*)PTR_FROM_BLOCK(block); |
| 272 | if (obj->type != NULL) { |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 273 | // if the object has a type then see if it has a __del__ method |
| 274 | mp_obj_t dest[2]; |
Damien George | f7782f8 | 2015-12-16 19:45:42 -0500 | [diff] [blame] | 275 | mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 276 | if (dest[0] != MP_OBJ_NULL) { |
Damien George | c7e8c6f | 2017-04-12 13:52:04 +1000 | [diff] [blame] | 277 | // load_method returned a method, execute it in a protected environment |
| 278 | #if MICROPY_ENABLE_SCHEDULER |
| 279 | mp_sched_lock(); |
| 280 | #endif |
| 281 | mp_call_function_1_protected(dest[0], dest[1]); |
| 282 | #if MICROPY_ENABLE_SCHEDULER |
| 283 | mp_sched_unlock(); |
| 284 | #endif |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 285 | } |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 286 | } |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 287 | // clear finaliser flag |
| 288 | FTB_CLEAR(block); |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 289 | } |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 290 | #endif |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 291 | free_tail = 1; |
Paul Sokolovsky | dea3fb9 | 2017-12-09 01:54:01 +0200 | [diff] [blame] | 292 | DEBUG_printf("gc_sweep(%p)\n", PTR_FROM_BLOCK(block)); |
Paul Sokolovsky | 755a55f | 2014-06-05 22:48:02 +0300 | [diff] [blame] | 293 | #if MICROPY_PY_GC_COLLECT_RETVAL |
Damien George | e1e359f | 2015-02-07 17:24:10 +0000 | [diff] [blame] | 294 | MP_STATE_MEM(gc_collected)++; |
Paul Sokolovsky | 755a55f | 2014-06-05 22:48:02 +0300 | [diff] [blame] | 295 | #endif |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 296 | // fall through to free the head |
| 297 | |
| 298 | case AT_TAIL: |
| 299 | if (free_tail) { |
| 300 | ATB_ANY_TO_FREE(block); |
Paul Sokolovsky | 9ef4be8 | 2017-12-08 00:10:44 +0200 | [diff] [blame] | 301 | #if CLEAR_ON_SWEEP |
| 302 | memset((void*)PTR_FROM_BLOCK(block), 0, BYTES_PER_BLOCK); |
| 303 | #endif |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 304 | } |
| 305 | break; |
| 306 | |
| 307 | case AT_MARK: |
| 308 | ATB_MARK_TO_HEAD(block); |
| 309 | free_tail = 0; |
| 310 | break; |
| 311 | } |
| 312 | } |
| 313 | } |
| 314 | |
Damien | 8b3a7c2 | 2013-10-23 20:20:17 +0100 | [diff] [blame] | 315 | void gc_collect_start(void) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 316 | GC_ENTER(); |
| 317 | MP_STATE_MEM(gc_lock_depth)++; |
Paul Sokolovsky | 93e353e | 2016-07-21 00:37:30 +0300 | [diff] [blame] | 318 | #if MICROPY_GC_ALLOC_THRESHOLD |
| 319 | MP_STATE_MEM(gc_alloc_amount) = 0; |
| 320 | #endif |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 321 | MP_STATE_MEM(gc_stack_overflow) = 0; |
| 322 | MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack); |
Damien George | 02d830c | 2017-11-26 23:28:40 +1100 | [diff] [blame] | 323 | |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 324 | // Trace root pointers. This relies on the root pointers being organised |
| 325 | // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals, |
| 326 | // dict_globals, then the root pointer section of mp_state_vm. |
| 327 | void **ptrs = (void**)(void*)&mp_state_ctx; |
Damien George | 330165a | 2016-04-22 22:44:56 +0000 | [diff] [blame] | 328 | gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.qstr_last_chunk) / sizeof(void*)); |
Damien George | 02d830c | 2017-11-26 23:28:40 +1100 | [diff] [blame] | 329 | |
| 330 | #if MICROPY_ENABLE_PYSTACK |
| 331 | // Trace root pointers from the Python stack. |
| 332 | ptrs = (void**)(void*)MP_STATE_THREAD(pystack_start); |
| 333 | gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void*)); |
| 334 | #endif |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 335 | } |
| 336 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 337 | void gc_collect_root(void **ptrs, size_t len) { |
| 338 | for (size_t i = 0; i < len; i++) { |
| 339 | void *ptr = ptrs[i]; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 340 | VERIFY_MARK_AND_PUSH(ptr); |
| 341 | gc_drain_stack(); |
| 342 | } |
| 343 | } |
| 344 | |
Damien | 8b3a7c2 | 2013-10-23 20:20:17 +0100 | [diff] [blame] | 345 | void gc_collect_end(void) { |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 346 | gc_deal_with_stack_overflow(); |
| 347 | gc_sweep(); |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 348 | MP_STATE_MEM(gc_last_free_atb_index) = 0; |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 349 | MP_STATE_MEM(gc_lock_depth)--; |
| 350 | GC_EXIT(); |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 351 | } |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 352 | |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 353 | void gc_info(gc_info_t *info) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 354 | GC_ENTER(); |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 355 | info->total = MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start); |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 356 | info->used = 0; |
| 357 | info->free = 0; |
Paul Sokolovsky | 749cbac | 2016-07-01 00:09:55 +0300 | [diff] [blame] | 358 | info->max_free = 0; |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 359 | info->num_1block = 0; |
| 360 | info->num_2block = 0; |
| 361 | info->max_block = 0; |
Paul Sokolovsky | 6a6e0b7 | 2016-06-30 01:59:24 +0300 | [diff] [blame] | 362 | bool finish = false; |
Paul Sokolovsky | 749cbac | 2016-07-01 00:09:55 +0300 | [diff] [blame] | 363 | for (size_t block = 0, len = 0, len_free = 0; !finish;) { |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 364 | size_t kind = ATB_GET_KIND(block); |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 365 | switch (kind) { |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 366 | case AT_FREE: |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 367 | info->free += 1; |
Paul Sokolovsky | 749cbac | 2016-07-01 00:09:55 +0300 | [diff] [blame] | 368 | len_free += 1; |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 369 | len = 0; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 370 | break; |
| 371 | |
| 372 | case AT_HEAD: |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 373 | info->used += 1; |
| 374 | len = 1; |
| 375 | break; |
| 376 | |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 377 | case AT_TAIL: |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 378 | info->used += 1; |
| 379 | len += 1; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 380 | break; |
| 381 | |
| 382 | case AT_MARK: |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 383 | // shouldn't happen |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 384 | break; |
| 385 | } |
Paul Sokolovsky | 6a6e0b7 | 2016-06-30 01:59:24 +0300 | [diff] [blame] | 386 | |
| 387 | block++; |
| 388 | finish = (block == MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB); |
| 389 | // Get next block type if possible |
| 390 | if (!finish) { |
| 391 | kind = ATB_GET_KIND(block); |
| 392 | } |
| 393 | |
| 394 | if (finish || kind == AT_FREE || kind == AT_HEAD) { |
| 395 | if (len == 1) { |
| 396 | info->num_1block += 1; |
| 397 | } else if (len == 2) { |
| 398 | info->num_2block += 1; |
| 399 | } |
| 400 | if (len > info->max_block) { |
| 401 | info->max_block = len; |
| 402 | } |
Paul Sokolovsky | 749cbac | 2016-07-01 00:09:55 +0300 | [diff] [blame] | 403 | if (finish || kind == AT_HEAD) { |
| 404 | if (len_free > info->max_free) { |
| 405 | info->max_free = len_free; |
| 406 | } |
| 407 | len_free = 0; |
| 408 | } |
Paul Sokolovsky | 6a6e0b7 | 2016-06-30 01:59:24 +0300 | [diff] [blame] | 409 | } |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 410 | } |
| 411 | |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 412 | info->used *= BYTES_PER_BLOCK; |
| 413 | info->free *= BYTES_PER_BLOCK; |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 414 | GC_EXIT(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 415 | } |
| 416 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 417 | void *gc_alloc(size_t n_bytes, bool has_finaliser) { |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 418 | size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK; |
stijn | def10ce | 2014-06-18 10:20:41 +0200 | [diff] [blame] | 419 | DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 420 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 421 | // check for 0 allocation |
| 422 | if (n_blocks == 0) { |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 423 | return NULL; |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 424 | } |
| 425 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 426 | GC_ENTER(); |
| 427 | |
| 428 | // check if GC is locked |
| 429 | if (MP_STATE_MEM(gc_lock_depth) > 0) { |
| 430 | GC_EXIT(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 431 | return NULL; |
| 432 | } |
| 433 | |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 434 | size_t i; |
| 435 | size_t end_block; |
| 436 | size_t start_block; |
| 437 | size_t n_free = 0; |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 438 | int collected = !MP_STATE_MEM(gc_auto_collect_enabled); |
Paul Sokolovsky | 93e353e | 2016-07-21 00:37:30 +0300 | [diff] [blame] | 439 | |
| 440 | #if MICROPY_GC_ALLOC_THRESHOLD |
| 441 | if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) { |
| 442 | GC_EXIT(); |
| 443 | gc_collect(); |
| 444 | GC_ENTER(); |
| 445 | } |
| 446 | #endif |
| 447 | |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 448 | for (;;) { |
| 449 | |
| 450 | // look for a run of n_blocks available blocks |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 451 | for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) { |
| 452 | byte a = MP_STATE_MEM(gc_alloc_table_start)[i]; |
Damien George | d5e7f6e | 2014-08-22 18:17:02 +0100 | [diff] [blame] | 453 | if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; } |
| 454 | if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; } |
| 455 | if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; } |
| 456 | if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; } |
| 457 | } |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 458 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 459 | GC_EXIT(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 460 | // nothing found! |
| 461 | if (collected) { |
| 462 | return NULL; |
| 463 | } |
Paul Sokolovsky | 723a6ed | 2014-02-11 18:01:38 +0200 | [diff] [blame] | 464 | DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 465 | gc_collect(); |
| 466 | collected = 1; |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 467 | GC_ENTER(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 468 | } |
| 469 | |
| 470 | // found, ending at block i inclusive |
| 471 | found: |
| 472 | // get starting and end blocks, both inclusive |
| 473 | end_block = i; |
| 474 | start_block = i - n_free + 1; |
| 475 | |
Damien George | b796e3d | 2014-08-28 10:18:40 +0100 | [diff] [blame] | 476 | // Set last free ATB index to block after last block we found, for start of |
| 477 | // next scan. To reduce fragmentation, we only do this if we were looking |
| 478 | // for a single free block, which guarantees that there are no free blocks |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 479 | // before this one. Also, whenever we free or shink a block we must check |
| 480 | // if this index needs adjusting (see gc_realloc and gc_free). |
Damien George | b796e3d | 2014-08-28 10:18:40 +0100 | [diff] [blame] | 481 | if (n_free == 1) { |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 482 | MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB; |
Damien George | b796e3d | 2014-08-28 10:18:40 +0100 | [diff] [blame] | 483 | } |
Damien George | d5e7f6e | 2014-08-22 18:17:02 +0100 | [diff] [blame] | 484 | |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 485 | // mark first block as used head |
| 486 | ATB_FREE_TO_HEAD(start_block); |
| 487 | |
| 488 | // mark rest of blocks as used tail |
| 489 | // TODO for a run of many blocks can make this more efficient |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 490 | for (size_t bl = start_block + 1; bl <= end_block; bl++) { |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 491 | ATB_FREE_TO_TAIL(bl); |
| 492 | } |
| 493 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 494 | // get pointer to first block |
Damien George | 3653f51 | 2016-05-05 10:25:08 +0000 | [diff] [blame] | 495 | // we must create this pointer before unlocking the GC so a collection can find it |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 496 | void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK); |
stijn | def10ce | 2014-06-18 10:20:41 +0200 | [diff] [blame] | 497 | DEBUG_printf("gc_alloc(%p)\n", ret_ptr); |
mux | cc849f7 | 2014-04-05 15:49:03 +0200 | [diff] [blame] | 498 | |
Paul Sokolovsky | 93e353e | 2016-07-21 00:37:30 +0300 | [diff] [blame] | 499 | #if MICROPY_GC_ALLOC_THRESHOLD |
| 500 | MP_STATE_MEM(gc_alloc_amount) += n_blocks; |
| 501 | #endif |
| 502 | |
Damien George | 3653f51 | 2016-05-05 10:25:08 +0000 | [diff] [blame] | 503 | GC_EXIT(); |
| 504 | |
Damien George | 5ffe1d8 | 2016-08-26 15:35:26 +1000 | [diff] [blame] | 505 | #if MICROPY_GC_CONSERVATIVE_CLEAR |
| 506 | // be conservative and zero out all the newly allocated blocks |
| 507 | memset((byte*)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK); |
| 508 | #else |
Damien George | 32bef31 | 2014-04-26 22:23:42 +0100 | [diff] [blame] | 509 | // zero out the additional bytes of the newly allocated blocks |
Damien George | daab651 | 2014-04-25 23:37:55 +0100 | [diff] [blame] | 510 | // This is needed because the blocks may have previously held pointers |
| 511 | // to the heap and will not be set to something else if the caller |
| 512 | // doesn't actually use the entire block. As such they will continue |
| 513 | // to point to the heap and may prevent other blocks from being reclaimed. |
Damien George | c037694 | 2014-06-13 22:33:31 +0100 | [diff] [blame] | 514 | memset((byte*)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes); |
Damien George | 5ffe1d8 | 2016-08-26 15:35:26 +1000 | [diff] [blame] | 515 | #endif |
Damien George | daab651 | 2014-04-25 23:37:55 +0100 | [diff] [blame] | 516 | |
Damien George | 3a2171e | 2015-09-04 16:53:46 +0100 | [diff] [blame] | 517 | #if MICROPY_ENABLE_FINALISER |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 518 | if (has_finaliser) { |
Damien George | 32bef31 | 2014-04-26 22:23:42 +0100 | [diff] [blame] | 519 | // clear type pointer in case it is never set |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 520 | ((mp_obj_base_t*)ret_ptr)->type = NULL; |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 521 | // set mp_obj flag only if it has a finaliser |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 522 | GC_ENTER(); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 523 | FTB_SET(start_block); |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 524 | GC_EXIT(); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 525 | } |
Damien George | 3a2171e | 2015-09-04 16:53:46 +0100 | [diff] [blame] | 526 | #else |
| 527 | (void)has_finaliser; |
| 528 | #endif |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 529 | |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 530 | #if EXTENSIVE_HEAP_PROFILING |
| 531 | gc_dump_alloc_table(); |
| 532 | #endif |
| 533 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 534 | return ret_ptr; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 535 | } |
| 536 | |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 537 | /* |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 538 | void *gc_alloc(mp_uint_t n_bytes) { |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 539 | return _gc_alloc(n_bytes, false); |
| 540 | } |
| 541 | |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 542 | void *gc_alloc_with_finaliser(mp_uint_t n_bytes) { |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 543 | return _gc_alloc(n_bytes, true); |
| 544 | } |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 545 | */ |
mux | 4f7e9f5 | 2014-04-03 23:55:12 +0200 | [diff] [blame] | 546 | |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 547 | // force the freeing of a piece of memory |
Dave Hylands | 7f3c0d1 | 2015-11-06 14:32:47 -0800 | [diff] [blame] | 548 | // TODO: freeing here does not call finaliser |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 549 | void gc_free(void *ptr) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 550 | GC_ENTER(); |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 551 | if (MP_STATE_MEM(gc_lock_depth) > 0) { |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 552 | // TODO how to deal with this error? |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 553 | GC_EXIT(); |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 554 | return; |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 555 | } |
| 556 | |
stijn | bbcea3f | 2014-06-16 10:44:29 +0200 | [diff] [blame] | 557 | DEBUG_printf("gc_free(%p)\n", ptr); |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 558 | |
Damien George | 12d4fa9 | 2017-07-12 12:17:38 +1000 | [diff] [blame] | 559 | if (ptr == NULL) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 560 | GC_EXIT(); |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 561 | } else { |
Damien George | 12d4fa9 | 2017-07-12 12:17:38 +1000 | [diff] [blame] | 562 | // get the GC block number corresponding to this pointer |
| 563 | assert(VERIFY_PTR(ptr)); |
| 564 | size_t block = BLOCK_FROM_PTR(ptr); |
| 565 | assert(ATB_GET_KIND(block) == AT_HEAD); |
| 566 | |
| 567 | #if MICROPY_ENABLE_FINALISER |
| 568 | FTB_CLEAR(block); |
| 569 | #endif |
| 570 | |
| 571 | // set the last_free pointer to this block if it's earlier in the heap |
| 572 | if (block / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) { |
| 573 | MP_STATE_MEM(gc_last_free_atb_index) = block / BLOCKS_PER_ATB; |
| 574 | } |
| 575 | |
| 576 | // free head and all of its tail blocks |
| 577 | do { |
| 578 | ATB_ANY_TO_FREE(block); |
| 579 | block += 1; |
| 580 | } while (ATB_GET_KIND(block) == AT_TAIL); |
| 581 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 582 | GC_EXIT(); |
Damien George | 12d4fa9 | 2017-07-12 12:17:38 +1000 | [diff] [blame] | 583 | |
| 584 | #if EXTENSIVE_HEAP_PROFILING |
| 585 | gc_dump_alloc_table(); |
| 586 | #endif |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 587 | } |
| 588 | } |
| 589 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 590 | size_t gc_nbytes(const void *ptr) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 591 | GC_ENTER(); |
Damien | fd8b6bc | 2013-10-22 20:26:36 +0100 | [diff] [blame] | 592 | if (VERIFY_PTR(ptr)) { |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 593 | size_t block = BLOCK_FROM_PTR(ptr); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 594 | if (ATB_GET_KIND(block) == AT_HEAD) { |
| 595 | // work out number of consecutive blocks in the chain starting with this on |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 596 | size_t n_blocks = 0; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 597 | do { |
| 598 | n_blocks += 1; |
| 599 | } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL); |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 600 | GC_EXIT(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 601 | return n_blocks * BYTES_PER_BLOCK; |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | // invalid pointer |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 606 | GC_EXIT(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 607 | return 0; |
| 608 | } |
| 609 | |
mux | 8782676 | 2014-03-12 21:00:23 +0200 | [diff] [blame] | 610 | #if 0 |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 611 | // old, simple realloc that didn't expand memory in place |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 612 | void *gc_realloc(void *ptr, mp_uint_t n_bytes) { |
| 613 | mp_uint_t n_existing = gc_nbytes(ptr); |
Damien George | 6fc765c | 2014-03-07 00:21:51 +0000 | [diff] [blame] | 614 | if (n_bytes <= n_existing) { |
| 615 | return ptr; |
| 616 | } else { |
Damien George | 410f307 | 2014-04-25 11:44:53 +0000 | [diff] [blame] | 617 | bool has_finaliser; |
| 618 | if (ptr == NULL) { |
| 619 | has_finaliser = false; |
| 620 | } else { |
Paul Sokolovsky | ed162b5 | 2014-04-20 11:43:38 +0300 | [diff] [blame] | 621 | #if MICROPY_ENABLE_FINALISER |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 622 | has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr)); |
Paul Sokolovsky | ed162b5 | 2014-04-20 11:43:38 +0300 | [diff] [blame] | 623 | #else |
Damien George | 410f307 | 2014-04-25 11:44:53 +0000 | [diff] [blame] | 624 | has_finaliser = false; |
Paul Sokolovsky | ed162b5 | 2014-04-20 11:43:38 +0300 | [diff] [blame] | 625 | #endif |
Damien George | 410f307 | 2014-04-25 11:44:53 +0000 | [diff] [blame] | 626 | } |
| 627 | void *ptr2 = gc_alloc(n_bytes, has_finaliser); |
Damien George | 6fc765c | 2014-03-07 00:21:51 +0000 | [diff] [blame] | 628 | if (ptr2 == NULL) { |
| 629 | return ptr2; |
| 630 | } |
| 631 | memcpy(ptr2, ptr, n_existing); |
| 632 | gc_free(ptr); |
| 633 | return ptr2; |
| 634 | } |
| 635 | } |
Paul Sokolovsky | ed162b5 | 2014-04-20 11:43:38 +0300 | [diff] [blame] | 636 | |
| 637 | #else // Alternative gc_realloc impl |
Damien George | 443e018 | 2014-04-08 11:31:21 +0000 | [diff] [blame] | 638 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 639 | void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 640 | // check for pure allocation |
mux | fbaa147 | 2014-03-05 23:23:04 +0200 | [diff] [blame] | 641 | if (ptr_in == NULL) { |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 642 | return gc_alloc(n_bytes, false); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 643 | } |
mux | fbaa147 | 2014-03-05 23:23:04 +0200 | [diff] [blame] | 644 | |
Damien George | 37378f8 | 2014-10-23 12:02:00 +0100 | [diff] [blame] | 645 | // check for pure free |
| 646 | if (n_bytes == 0) { |
| 647 | gc_free(ptr_in); |
| 648 | return NULL; |
| 649 | } |
| 650 | |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 651 | void *ptr = ptr_in; |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 652 | |
Damien George | e33806a | 2016-05-04 09:14:43 +0000 | [diff] [blame] | 653 | GC_ENTER(); |
| 654 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 655 | if (MP_STATE_MEM(gc_lock_depth) > 0) { |
| 656 | GC_EXIT(); |
| 657 | return NULL; |
| 658 | } |
| 659 | |
Damien George | 74fad35 | 2017-11-29 17:17:08 +1100 | [diff] [blame] | 660 | // get the GC block number corresponding to this pointer |
| 661 | assert(VERIFY_PTR(ptr)); |
| 662 | size_t block = BLOCK_FROM_PTR(ptr); |
| 663 | assert(ATB_GET_KIND(block) == AT_HEAD); |
| 664 | |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 665 | // compute number of new blocks that are requested |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 666 | size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK; |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 667 | |
Damien George | 9b0b373 | 2014-10-15 18:24:47 +0000 | [diff] [blame] | 668 | // Get the total number of consecutive blocks that are already allocated to |
| 669 | // this chunk of memory, and then count the number of free blocks following |
| 670 | // it. Stop if we reach the end of the heap, or if we find enough extra |
| 671 | // free blocks to satisfy the realloc. Note that we need to compute the |
| 672 | // total size of the existing memory chunk so we can correctly and |
| 673 | // efficiently shrink it (see below for shrinking code). |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 674 | size_t n_free = 0; |
| 675 | size_t n_blocks = 1; // counting HEAD block |
| 676 | size_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; |
| 677 | for (size_t bl = block + n_blocks; bl < max_block; bl++) { |
Damien George | 9b0b373 | 2014-10-15 18:24:47 +0000 | [diff] [blame] | 678 | byte block_type = ATB_GET_KIND(bl); |
| 679 | if (block_type == AT_TAIL) { |
| 680 | n_blocks++; |
| 681 | continue; |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 682 | } |
Damien George | 9b0b373 | 2014-10-15 18:24:47 +0000 | [diff] [blame] | 683 | if (block_type == AT_FREE) { |
| 684 | n_free++; |
| 685 | if (n_blocks + n_free >= new_blocks) { |
| 686 | // stop as soon as we find enough blocks for n_bytes |
| 687 | break; |
| 688 | } |
| 689 | continue; |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 690 | } |
| 691 | break; |
| 692 | } |
| 693 | |
| 694 | // return original ptr if it already has the requested number of blocks |
| 695 | if (new_blocks == n_blocks) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 696 | GC_EXIT(); |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 697 | return ptr_in; |
| 698 | } |
| 699 | |
| 700 | // check if we can shrink the allocated area |
| 701 | if (new_blocks < n_blocks) { |
| 702 | // free unneeded tail blocks |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 703 | for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) { |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 704 | ATB_ANY_TO_FREE(bl); |
| 705 | } |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 706 | |
| 707 | // set the last_free pointer to end of this block if it's earlier in the heap |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 708 | if ((block + new_blocks) / BLOCKS_PER_ATB < MP_STATE_MEM(gc_last_free_atb_index)) { |
| 709 | MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB; |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 710 | } |
| 711 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 712 | GC_EXIT(); |
| 713 | |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 714 | #if EXTENSIVE_HEAP_PROFILING |
| 715 | gc_dump_alloc_table(); |
| 716 | #endif |
| 717 | |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 718 | return ptr_in; |
| 719 | } |
| 720 | |
| 721 | // check if we can expand in place |
| 722 | if (new_blocks <= n_blocks + n_free) { |
| 723 | // mark few more blocks as used tail |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 724 | for (size_t bl = block + n_blocks; bl < block + new_blocks; bl++) { |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 725 | assert(ATB_GET_KIND(bl) == AT_FREE); |
| 726 | ATB_FREE_TO_TAIL(bl); |
| 727 | } |
Damien George | daab651 | 2014-04-25 23:37:55 +0100 | [diff] [blame] | 728 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 729 | GC_EXIT(); |
| 730 | |
Damien George | 5ffe1d8 | 2016-08-26 15:35:26 +1000 | [diff] [blame] | 731 | #if MICROPY_GC_CONSERVATIVE_CLEAR |
| 732 | // be conservative and zero out all the newly allocated blocks |
| 733 | memset((byte*)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK); |
| 734 | #else |
Damien George | 32bef31 | 2014-04-26 22:23:42 +0100 | [diff] [blame] | 735 | // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc) |
stijn | f33385f | 2014-06-12 17:42:20 +0200 | [diff] [blame] | 736 | memset((byte*)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes); |
Damien George | 5ffe1d8 | 2016-08-26 15:35:26 +1000 | [diff] [blame] | 737 | #endif |
Damien George | daab651 | 2014-04-25 23:37:55 +0100 | [diff] [blame] | 738 | |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 739 | #if EXTENSIVE_HEAP_PROFILING |
| 740 | gc_dump_alloc_table(); |
| 741 | #endif |
| 742 | |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 743 | return ptr_in; |
| 744 | } |
| 745 | |
Damien George | e33806a | 2016-05-04 09:14:43 +0000 | [diff] [blame] | 746 | #if MICROPY_ENABLE_FINALISER |
| 747 | bool ftb_state = FTB_GET(block); |
| 748 | #else |
| 749 | bool ftb_state = false; |
| 750 | #endif |
| 751 | |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 752 | GC_EXIT(); |
| 753 | |
Damien George | ade9a05 | 2015-06-13 21:53:22 +0100 | [diff] [blame] | 754 | if (!allow_move) { |
| 755 | // not allowed to move memory block so return failure |
| 756 | return NULL; |
| 757 | } |
| 758 | |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 759 | // can't resize inplace; try to find a new contiguous chain |
Damien George | e33806a | 2016-05-04 09:14:43 +0000 | [diff] [blame] | 760 | void *ptr_out = gc_alloc(n_bytes, ftb_state); |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 761 | |
| 762 | // check that the alloc succeeded |
| 763 | if (ptr_out == NULL) { |
| 764 | return NULL; |
| 765 | } |
| 766 | |
stijn | bbcea3f | 2014-06-16 10:44:29 +0200 | [diff] [blame] | 767 | DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out); |
Damien George | dde739d | 2014-04-20 18:16:25 +0100 | [diff] [blame] | 768 | memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK); |
| 769 | gc_free(ptr_in); |
| 770 | return ptr_out; |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 771 | } |
Paul Sokolovsky | ed162b5 | 2014-04-20 11:43:38 +0300 | [diff] [blame] | 772 | #endif // Alternative gc_realloc impl |
mux | 8782676 | 2014-03-12 21:00:23 +0200 | [diff] [blame] | 773 | |
Damien George | abc1959 | 2015-01-12 22:34:38 +0000 | [diff] [blame] | 774 | void gc_dump_info(void) { |
Paul Sokolovsky | 723a6ed | 2014-02-11 18:01:38 +0200 | [diff] [blame] | 775 | gc_info_t info; |
| 776 | gc_info(&info); |
Damien George | acaccb3 | 2015-12-18 12:52:45 +0000 | [diff] [blame] | 777 | mp_printf(&mp_plat_print, "GC: total: %u, used: %u, free: %u\n", |
| 778 | (uint)info.total, (uint)info.used, (uint)info.free); |
Paul Sokolovsky | 749cbac | 2016-07-01 00:09:55 +0300 | [diff] [blame] | 779 | mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n", |
| 780 | (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free); |
Paul Sokolovsky | 723a6ed | 2014-02-11 18:01:38 +0200 | [diff] [blame] | 781 | } |
| 782 | |
Damien George | ce1162a | 2014-02-26 22:55:59 +0000 | [diff] [blame] | 783 | void gc_dump_alloc_table(void) { |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 784 | GC_ENTER(); |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 785 | static const size_t DUMP_BYTES_PER_LINE = 64; |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 786 | #if !EXTENSIVE_HEAP_PROFILING |
| 787 | // When comparing heap output we don't want to print the starting |
| 788 | // pointer of the heap because it changes from run to run. |
Damien George | e72cda9 | 2015-04-11 12:15:47 +0100 | [diff] [blame] | 789 | mp_printf(&mp_plat_print, "GC memory layout; from %p:", MP_STATE_MEM(gc_pool_start)); |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 790 | #endif |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 791 | for (size_t bl = 0; bl < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; bl++) { |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 792 | if (bl % DUMP_BYTES_PER_LINE == 0) { |
| 793 | // a new line of blocks |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 794 | { |
| 795 | // check if this line contains only free blocks |
Damien George | d977d26 | 2015-12-16 20:09:11 -0500 | [diff] [blame] | 796 | size_t bl2 = bl; |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 797 | while (bl2 < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) { |
Damien George | 0b13f3e | 2014-10-24 23:12:25 +0100 | [diff] [blame] | 798 | bl2++; |
| 799 | } |
| 800 | if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) { |
| 801 | // there are at least 2 lines containing only free blocks, so abbreviate their printing |
Damien George | acaccb3 | 2015-12-18 12:52:45 +0000 | [diff] [blame] | 802 | mp_printf(&mp_plat_print, "\n (%u lines all free)", (uint)(bl2 - bl) / DUMP_BYTES_PER_LINE); |
Damien George | 0b13f3e | 2014-10-24 23:12:25 +0100 | [diff] [blame] | 803 | bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1)); |
Damien George | b4b10fd | 2015-01-01 23:30:53 +0000 | [diff] [blame] | 804 | if (bl >= MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB) { |
Damien George | 0b13f3e | 2014-10-24 23:12:25 +0100 | [diff] [blame] | 805 | // got to end of heap |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 806 | break; |
| 807 | } |
| 808 | } |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 809 | } |
Damien George | 516b09e | 2014-08-28 23:06:38 +0100 | [diff] [blame] | 810 | // print header for new line of blocks |
Damien George | 12ab9ed | 2015-03-31 23:07:02 +0100 | [diff] [blame] | 811 | // (the cast to uint32_t is for 16-bit ports) |
Paul Sokolovsky | 68a7a92 | 2016-05-13 00:16:38 +0300 | [diff] [blame] | 812 | //mp_printf(&mp_plat_print, "\n%05x: ", (uint)(PTR_FROM_BLOCK(bl) & (uint32_t)0xfffff)); |
Damien George | e72cda9 | 2015-04-11 12:15:47 +0100 | [diff] [blame] | 813 | mp_printf(&mp_plat_print, "\n%05x: ", (uint)((bl * BYTES_PER_BLOCK) & (uint32_t)0xfffff)); |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 814 | } |
Damien George | ce1162a | 2014-02-26 22:55:59 +0000 | [diff] [blame] | 815 | int c = ' '; |
| 816 | switch (ATB_GET_KIND(bl)) { |
| 817 | case AT_FREE: c = '.'; break; |
Damien George | c30595e | 2014-10-17 14:12:57 +0000 | [diff] [blame] | 818 | /* this prints out if the object is reachable from BSS or STACK (for unix only) |
| 819 | case AT_HEAD: { |
Damien George | c30595e | 2014-10-17 14:12:57 +0000 | [diff] [blame] | 820 | c = 'h'; |
stijn | afd6c8e | 2015-01-08 10:32:45 +0100 | [diff] [blame] | 821 | void **ptrs = (void**)(void*)&mp_state_ctx; |
| 822 | mp_uint_t len = offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t); |
Damien George | c30595e | 2014-10-17 14:12:57 +0000 | [diff] [blame] | 823 | for (mp_uint_t i = 0; i < len; i++) { |
| 824 | mp_uint_t ptr = (mp_uint_t)ptrs[i]; |
| 825 | if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) { |
| 826 | c = 'B'; |
| 827 | break; |
| 828 | } |
| 829 | } |
| 830 | if (c == 'h') { |
| 831 | ptrs = (void**)&c; |
Damien George | 330165a | 2016-04-22 22:44:56 +0000 | [diff] [blame] | 832 | len = ((mp_uint_t)MP_STATE_THREAD(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t); |
Damien George | c30595e | 2014-10-17 14:12:57 +0000 | [diff] [blame] | 833 | for (mp_uint_t i = 0; i < len; i++) { |
| 834 | mp_uint_t ptr = (mp_uint_t)ptrs[i]; |
| 835 | if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) { |
| 836 | c = 'S'; |
| 837 | break; |
| 838 | } |
| 839 | } |
| 840 | } |
| 841 | break; |
| 842 | } |
| 843 | */ |
Damien George | 0b13f3e | 2014-10-24 23:12:25 +0100 | [diff] [blame] | 844 | /* this prints the uPy object type of the head block */ |
Damien George | daab651 | 2014-04-25 23:37:55 +0100 | [diff] [blame] | 845 | case AT_HEAD: { |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 846 | void **ptr = (void**)(MP_STATE_MEM(gc_pool_start) + bl * BYTES_PER_BLOCK); |
| 847 | if (*ptr == &mp_type_tuple) { c = 'T'; } |
| 848 | else if (*ptr == &mp_type_list) { c = 'L'; } |
| 849 | else if (*ptr == &mp_type_dict) { c = 'D'; } |
Paul Sokolovsky | 3d7f3f0 | 2016-05-11 18:52:46 +0300 | [diff] [blame] | 850 | else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) { c = 'S'; } |
Paul Sokolovsky | bc04dc2 | 2016-05-11 19:21:53 +0300 | [diff] [blame] | 851 | #if MICROPY_PY_BUILTINS_BYTEARRAY |
| 852 | else if (*ptr == &mp_type_bytearray) { c = 'A'; } |
| 853 | #endif |
| 854 | #if MICROPY_PY_ARRAY |
| 855 | else if (*ptr == &mp_type_array) { c = 'A'; } |
| 856 | #endif |
Damien George | 7860c2a | 2014-11-05 21:16:41 +0000 | [diff] [blame] | 857 | #if MICROPY_PY_BUILTINS_FLOAT |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 858 | else if (*ptr == &mp_type_float) { c = 'F'; } |
Damien George | 7860c2a | 2014-11-05 21:16:41 +0000 | [diff] [blame] | 859 | #endif |
Damien George | 94fe6e5 | 2015-11-27 13:07:48 +0000 | [diff] [blame] | 860 | else if (*ptr == &mp_type_fun_bc) { c = 'B'; } |
| 861 | else if (*ptr == &mp_type_module) { c = 'M'; } |
Damien George | ec21405 | 2015-01-11 14:37:06 +0000 | [diff] [blame] | 862 | else { |
| 863 | c = 'h'; |
| 864 | #if 0 |
| 865 | // This code prints "Q" for qstr-pool data, and "q" for qstr-str |
| 866 | // data. It can be useful to see how qstrs are being allocated, |
| 867 | // but is disabled by default because it is very slow. |
| 868 | for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) { |
| 869 | if ((qstr_pool_t*)ptr == pool) { |
| 870 | c = 'Q'; |
| 871 | break; |
| 872 | } |
| 873 | for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) { |
| 874 | if ((const byte*)ptr == *q) { |
| 875 | c = 'q'; |
| 876 | break; |
| 877 | } |
| 878 | } |
| 879 | } |
| 880 | #endif |
| 881 | } |
Damien George | daab651 | 2014-04-25 23:37:55 +0100 | [diff] [blame] | 882 | break; |
| 883 | } |
Paul Sokolovsky | 9a8751b | 2016-05-13 00:16:38 +0300 | [diff] [blame] | 884 | case AT_TAIL: c = '='; break; |
Damien George | ce1162a | 2014-02-26 22:55:59 +0000 | [diff] [blame] | 885 | case AT_MARK: c = 'm'; break; |
| 886 | } |
Damien George | e72cda9 | 2015-04-11 12:15:47 +0100 | [diff] [blame] | 887 | mp_printf(&mp_plat_print, "%c", c); |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 888 | } |
Damien George | e72cda9 | 2015-04-11 12:15:47 +0100 | [diff] [blame] | 889 | mp_print_str(&mp_plat_print, "\n"); |
Damien George | c93d9ca | 2016-04-25 15:28:57 +0000 | [diff] [blame] | 890 | GC_EXIT(); |
Damien | eefcc79 | 2013-10-22 15:25:25 +0100 | [diff] [blame] | 891 | } |
| 892 | |
Damien George | ce1162a | 2014-02-26 22:55:59 +0000 | [diff] [blame] | 893 | #if DEBUG_PRINT |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 894 | void gc_test(void) { |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 895 | mp_uint_t len = 500; |
| 896 | mp_uint_t *heap = malloc(len); |
| 897 | gc_init(heap, heap + len / sizeof(mp_uint_t)); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 898 | void *ptrs[100]; |
| 899 | { |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 900 | mp_uint_t **p = gc_alloc(16, false); |
Damien George | 12bab72 | 2014-04-05 20:35:48 +0100 | [diff] [blame] | 901 | p[0] = gc_alloc(64, false); |
| 902 | p[1] = gc_alloc(1, false); |
| 903 | p[2] = gc_alloc(1, false); |
| 904 | p[3] = gc_alloc(1, false); |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 905 | mp_uint_t ***p2 = gc_alloc(16, false); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 906 | p2[0] = p; |
| 907 | p2[1] = p; |
| 908 | ptrs[0] = p2; |
| 909 | } |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 910 | for (int i = 0; i < 25; i+=2) { |
Damien George | 40f3c02 | 2014-07-03 13:25:24 +0100 | [diff] [blame] | 911 | mp_uint_t *p = gc_alloc(i, false); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 912 | printf("p=%p\n", p); |
| 913 | if (i & 3) { |
| 914 | //ptrs[i] = p; |
| 915 | } |
| 916 | } |
| 917 | |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 918 | printf("Before GC:\n"); |
Damien George | ce1162a | 2014-02-26 22:55:59 +0000 | [diff] [blame] | 919 | gc_dump_alloc_table(); |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 920 | printf("Starting GC...\n"); |
| 921 | gc_collect_start(); |
| 922 | gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void*)); |
| 923 | gc_collect_end(); |
| 924 | printf("After GC:\n"); |
Damien George | ce1162a | 2014-02-26 22:55:59 +0000 | [diff] [blame] | 925 | gc_dump_alloc_table(); |
Damien | dcced92 | 2013-10-21 23:45:08 +0100 | [diff] [blame] | 926 | } |
Paul Sokolovsky | af19cbd | 2014-02-10 21:45:54 +0200 | [diff] [blame] | 927 | #endif |
Damien George | d3ebe48 | 2014-01-07 15:20:33 +0000 | [diff] [blame] | 928 | |
| 929 | #endif // MICROPY_ENABLE_GC |