summaryrefslogtreecommitdiff
path: root/py
diff options
context:
space:
mode:
authorJim Mussared <jim.mussared@gmail.com>2021-09-06 12:28:06 +1000
committerDamien George <damien@micropython.org>2021-09-16 16:04:03 +1000
commitb326edf68c5edb648fac4dc2a3403ee33510e179 (patch)
treedf3a4e0666eb4a7ff85329befe306ec452b8cd64 /py
parent60c6d5594f165cf3af6e66076f8dceb24e0d859f (diff)
all: Remove MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE.
This commit removes all parts of code associated with the existing MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE optimisation option, including the -mcache-lookup-bc option to mpy-cross. This feature originally provided a significant performance boost for Unix, but wasn't able to be enabled for MCU targets (due to frozen bytecode), and added significant extra complexity to generating and distributing .mpy files. The equivalent performance gain is now provided by the combination of MICROPY_OPT_LOAD_ATTR_FAST_PATH and MICROPY_OPT_MAP_LOOKUP_CACHE (which has been enabled on the unix port in the previous commit). It's hard to provide precise performance numbers, but tests have been run on a wide variety of architectures (x86-64, ARM Cortex, Aarch64, RISC-V, xtensa) and they all generally agree on the qualitative improvements seen by the combination of MICROPY_OPT_LOAD_ATTR_FAST_PATH and MICROPY_OPT_MAP_LOOKUP_CACHE. For example, on a "quiet" Linux x64 environment (i3-5010U @ 2.10GHz) the change from CACHE_MAP_LOOKUP_IN_BYTECODE, to LOAD_ATTR_FAST_PATH combined with MAP_LOOKUP_CACHE is: diff of scores (higher is better) N=2000 M=2000 bccache -> attrmapcache diff diff% (error%) bm_chaos.py 13742.56 -> 13905.67 : +163.11 = +1.187% (+/-3.75%) bm_fannkuch.py 60.13 -> 61.34 : +1.21 = +2.012% (+/-2.11%) bm_fft.py 113083.20 -> 114793.68 : +1710.48 = +1.513% (+/-1.57%) bm_float.py 256552.80 -> 243908.29 : -12644.51 = -4.929% (+/-1.90%) bm_hexiom.py 521.93 -> 625.41 : +103.48 = +19.826% (+/-0.40%) bm_nqueens.py 197544.25 -> 217713.12 : +20168.87 = +10.210% (+/-3.01%) bm_pidigits.py 8072.98 -> 8198.75 : +125.77 = +1.558% (+/-3.22%) misc_aes.py 17283.45 -> 16480.52 : -802.93 = -4.646% (+/-0.82%) misc_mandel.py 99083.99 -> 128939.84 : +29855.85 = +30.132% (+/-5.88%) misc_pystone.py 83860.10 -> 82592.56 : -1267.54 = -1.511% (+/-2.27%) misc_raytrace.py 21490.40 -> 22227.23 : +736.83 = +3.429% (+/-1.88%) This shows that the new optimisations are at least as good as the existing inline-bytecode-caching, and are sometimes much better (because the new ones apply caching to a wider variety of map lookups). The new optimisations can also benefit code generated by the native emitter, because they apply to the runtime rather than the generated code. The improvement for the native emitter when LOAD_ATTR_FAST_PATH and MAP_LOOKUP_CACHE are enabled is (same Linux environment as above): diff of scores (higher is better) N=2000 M=2000 native -> nat-attrmapcache diff diff% (error%) bm_chaos.py 14130.62 -> 15464.68 : +1334.06 = +9.441% (+/-7.11%) bm_fannkuch.py 74.96 -> 76.16 : +1.20 = +1.601% (+/-1.80%) bm_fft.py 166682.99 -> 168221.86 : +1538.87 = +0.923% (+/-4.20%) bm_float.py 233415.23 -> 265524.90 : +32109.67 = +13.756% (+/-2.57%) bm_hexiom.py 628.59 -> 734.17 : +105.58 = +16.796% (+/-1.39%) bm_nqueens.py 225418.44 -> 232926.45 : +7508.01 = +3.331% (+/-3.10%) bm_pidigits.py 6322.00 -> 6379.52 : +57.52 = +0.910% (+/-5.62%) misc_aes.py 20670.10 -> 27223.18 : +6553.08 = +31.703% (+/-1.56%) misc_mandel.py 138221.11 -> 152014.01 : +13792.90 = +9.979% (+/-2.46%) misc_pystone.py 85032.14 -> 105681.44 : +20649.30 = +24.284% (+/-2.25%) misc_raytrace.py 19800.01 -> 23350.73 : +3550.72 = +17.933% (+/-2.79%) In summary, compared to MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE, the new MICROPY_OPT_LOAD_ATTR_FAST_PATH and MICROPY_OPT_MAP_LOOKUP_CACHE options: - are simpler; - take less code size; - are faster (generally); - work with code generated by the native emitter; - can be used on embedded targets with a small and constant RAM overhead; - allow the same .mpy bytecode to run on all targets. See #7680 for further discussion. And see also #7653 for a discussion about simplifying mpy-cross options. Signed-off-by: Jim Mussared <jim.mussared@gmail.com>
Diffstat (limited to 'py')
-rw-r--r--py/bc.c14
-rw-r--r--py/dynruntime.mk2
-rw-r--r--py/emitbc.c6
-rw-r--r--py/mpconfig.h9
-rw-r--r--py/mpstate.h1
-rw-r--r--py/persistentcode.h11
-rw-r--r--py/profile.c12
-rw-r--r--py/showbc.c12
-rw-r--r--py/vm.c101
9 files changed, 5 insertions, 163 deletions
diff --git a/py/bc.c b/py/bc.c
index 58694b97d..69b6739e8 100644
--- a/py/bc.c
+++ b/py/bc.c
@@ -304,24 +304,10 @@ void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw
// The following table encodes the number of bytes that a specific opcode
// takes up. Some opcodes have an extra byte, defined by MP_BC_MASK_EXTRA_BYTE.
-// There are 4 special opcodes that have an extra byte only when
-// MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE is enabled (and they take a qstr):
-// MP_BC_LOAD_NAME
-// MP_BC_LOAD_GLOBAL
-// MP_BC_LOAD_ATTR
-// MP_BC_STORE_ATTR
uint mp_opcode_format(const byte *ip, size_t *opcode_size, bool count_var_uint) {
uint f = MP_BC_FORMAT(*ip);
const byte *ip_start = ip;
if (f == MP_BC_FORMAT_QSTR) {
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
- if (*ip == MP_BC_LOAD_NAME
- || *ip == MP_BC_LOAD_GLOBAL
- || *ip == MP_BC_LOAD_ATTR
- || *ip == MP_BC_STORE_ATTR) {
- ip += 1;
- }
- }
ip += 3;
} else {
int extra_byte = (*ip & MP_BC_MASK_EXTRA_BYTE) == 0;
diff --git a/py/dynruntime.mk b/py/dynruntime.mk
index cb5ab845e..db06d41e7 100644
--- a/py/dynruntime.mk
+++ b/py/dynruntime.mk
@@ -46,7 +46,6 @@ ifeq ($(ARCH),x86)
# x86
CROSS =
CFLAGS += -m32 -fno-stack-protector
-MPY_CROSS_FLAGS += -mcache-lookup-bc
MICROPY_FLOAT_IMPL ?= double
else ifeq ($(ARCH),x64)
@@ -54,7 +53,6 @@ else ifeq ($(ARCH),x64)
# x64
CROSS =
CFLAGS += -fno-stack-protector
-MPY_CROSS_FLAGS += -mcache-lookup-bc
MICROPY_FLOAT_IMPL ?= double
else ifeq ($(ARCH),armv7m)
diff --git a/py/emitbc.c b/py/emitbc.c
index d7e8e05f0..ca7404603 100644
--- a/py/emitbc.c
+++ b/py/emitbc.c
@@ -560,9 +560,6 @@ void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind) {
MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_LOAD_GLOBAL);
(void)qst;
emit_write_bytecode_byte_qstr(emit, 1, MP_BC_LOAD_NAME + kind, qst);
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
- emit_write_bytecode_raw_byte(emit, 0);
- }
}
void mp_emit_bc_load_method(emit_t *emit, qstr qst, bool is_super) {
@@ -596,9 +593,6 @@ void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind) {
}
emit_write_bytecode_byte_qstr(emit, -2, MP_BC_STORE_ATTR, qst);
}
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) {
- emit_write_bytecode_raw_byte(emit, 0);
- }
}
void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
diff --git a/py/mpconfig.h b/py/mpconfig.h
index 97fc9bb58..6a2edc7dc 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -423,10 +423,8 @@
// Configure dynamic compiler macros
#if MICROPY_DYNAMIC_COMPILER
-#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC (mp_dynamic_compiler.opt_cache_map_lookup_in_bytecode)
#define MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC (mp_dynamic_compiler.py_builtins_str_unicode)
#else
-#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
#define MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC MICROPY_PY_BUILTINS_STR_UNICODE
#endif
@@ -520,13 +518,6 @@
#define MICROPY_OPT_COMPUTED_GOTO (0)
#endif
-// Whether to cache result of map lookups in LOAD_NAME, LOAD_GLOBAL, LOAD_ATTR,
-// STORE_ATTR bytecodes. Uses 1 byte extra RAM for each of these opcodes and
-// uses a bit of extra code ROM, but greatly improves lookup speed.
-#ifndef MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
-#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE (0)
-#endif
-
// Optimise the fast path for loading attributes from instance types. Increases
// Thumb2 code size by about 48 bytes.
#ifndef MICROPY_OPT_LOAD_ATTR_FAST_PATH
diff --git a/py/mpstate.h b/py/mpstate.h
index 53b290687..69360738c 100644
--- a/py/mpstate.h
+++ b/py/mpstate.h
@@ -44,7 +44,6 @@
#if MICROPY_DYNAMIC_COMPILER
typedef struct mp_dynamic_compiler_t {
uint8_t small_int_bits; // must be <= host small_int_bits
- bool opt_cache_map_lookup_in_bytecode;
bool py_builtins_str_unicode;
uint8_t native_arch;
uint8_t nlr_buf_num_regs;
diff --git a/py/persistentcode.h b/py/persistentcode.h
index 8769ef584..94fc3bf2d 100644
--- a/py/persistentcode.h
+++ b/py/persistentcode.h
@@ -41,16 +41,15 @@
#define MPY_FEATURE_ENCODE_ARCH(arch) ((arch) << 2)
#define MPY_FEATURE_DECODE_ARCH(feat) ((feat) >> 2)
-// The feature flag bits encode the compile-time config options that
-// affect the generate bytecode.
+// The feature flag bits encode the compile-time config options that affect
+// the generate bytecode. Note: position 0 is now unused
+// (formerly MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE).
#define MPY_FEATURE_FLAGS ( \
- ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) << 0) \
- | ((MICROPY_PY_BUILTINS_STR_UNICODE) << 1) \
+ ((MICROPY_PY_BUILTINS_STR_UNICODE) << 1) \
)
// This is a version of the flags that can be configured at runtime.
#define MPY_FEATURE_FLAGS_DYNAMIC ( \
- ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) << 0) \
- | ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \
+ ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \
)
// Define the host architecture
diff --git a/py/profile.c b/py/profile.c
index 054a0f9e6..d0ac99e07 100644
--- a/py/profile.c
+++ b/py/profile.c
@@ -540,9 +540,6 @@ STATIC const byte *mp_prof_opcode_decode(const byte *ip, const mp_uint_t *const_
instruction->qstr_opname = MP_QSTR_LOAD_NAME;
instruction->arg = qst;
instruction->argobj = MP_OBJ_NEW_QSTR(qst);
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT(*ip++);
- }
break;
case MP_BC_LOAD_GLOBAL:
@@ -550,9 +547,6 @@ STATIC const byte *mp_prof_opcode_decode(const byte *ip, const mp_uint_t *const_
instruction->qstr_opname = MP_QSTR_LOAD_GLOBAL;
instruction->arg = qst;
instruction->argobj = MP_OBJ_NEW_QSTR(qst);
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT(*ip++);
- }
break;
case MP_BC_LOAD_ATTR:
@@ -560,9 +554,6 @@ STATIC const byte *mp_prof_opcode_decode(const byte *ip, const mp_uint_t *const_
instruction->qstr_opname = MP_QSTR_LOAD_ATTR;
instruction->arg = qst;
instruction->argobj = MP_OBJ_NEW_QSTR(qst);
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT(*ip++);
- }
break;
case MP_BC_LOAD_METHOD:
@@ -618,9 +609,6 @@ STATIC const byte *mp_prof_opcode_decode(const byte *ip, const mp_uint_t *const_
instruction->qstr_opname = MP_QSTR_STORE_ATTR;
instruction->arg = qst;
instruction->argobj = MP_OBJ_NEW_QSTR(qst);
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT(*ip++);
- }
break;
case MP_BC_STORE_SUBSCR:
diff --git a/py/showbc.c b/py/showbc.c
index cb81b8835..c321dfd75 100644
--- a/py/showbc.c
+++ b/py/showbc.c
@@ -208,25 +208,16 @@ const byte *mp_bytecode_print_str(const mp_print_t *print, const byte *ip) {
case MP_BC_LOAD_NAME:
DECODE_QSTR;
mp_printf(print, "LOAD_NAME %s", qstr_str(qst));
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- mp_printf(print, " (cache=%u)", *ip++);
- }
break;
case MP_BC_LOAD_GLOBAL:
DECODE_QSTR;
mp_printf(print, "LOAD_GLOBAL %s", qstr_str(qst));
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- mp_printf(print, " (cache=%u)", *ip++);
- }
break;
case MP_BC_LOAD_ATTR:
DECODE_QSTR;
mp_printf(print, "LOAD_ATTR %s", qstr_str(qst));
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- mp_printf(print, " (cache=%u)", *ip++);
- }
break;
case MP_BC_LOAD_METHOD:
@@ -270,9 +261,6 @@ const byte *mp_bytecode_print_str(const mp_print_t *print, const byte *ip) {
case MP_BC_STORE_ATTR:
DECODE_QSTR;
mp_printf(print, "STORE_ATTR %s", qstr_str(qst));
- if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) {
- mp_printf(print, " (cache=%u)", *ip++);
- }
break;
case MP_BC_STORE_SUBSCR:
diff --git a/py/vm.c b/py/vm.c
index e5a62e441..f55d293dc 100644
--- a/py/vm.c
+++ b/py/vm.c
@@ -180,23 +180,6 @@
#define TRACE_TICK(current_ip, current_sp, is_exception)
#endif // MICROPY_PY_SYS_SETTRACE
-#if MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
-static inline mp_map_elem_t *mp_map_cached_lookup(mp_map_t *map, qstr qst, uint8_t *idx_cache) {
- size_t idx = *idx_cache;
- mp_obj_t key = MP_OBJ_NEW_QSTR(qst);
- mp_map_elem_t *elem = NULL;
- if (idx < map->alloc && map->table[idx].key == key) {
- elem = &map->table[idx];
- } else {
- elem = mp_map_lookup(map, key, MP_MAP_LOOKUP);
- if (elem != NULL) {
- *idx_cache = (elem - &map->table[0]) & 0xff;
- }
- }
- return elem;
-}
-#endif
-
// fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
// sp points to bottom of stack which grows up
// returns:
@@ -361,55 +344,20 @@ dispatch_loop:
goto load_check;
}
- #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
ENTRY(MP_BC_LOAD_NAME): {
MARK_EXC_IP_SELECTIVE();
DECODE_QSTR;
PUSH(mp_load_name(qst));
DISPATCH();
}
- #else
- ENTRY(MP_BC_LOAD_NAME): {
- MARK_EXC_IP_SELECTIVE();
- DECODE_QSTR;
- mp_map_elem_t *elem = mp_map_cached_lookup(&mp_locals_get()->map, qst, (uint8_t*)ip);
- mp_obj_t obj;
- if (elem != NULL) {
- obj = elem->value;
- } else {
- obj = mp_load_name(qst);
- }
- PUSH(obj);
- ip++;
- DISPATCH();
- }
- #endif
- #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
ENTRY(MP_BC_LOAD_GLOBAL): {
MARK_EXC_IP_SELECTIVE();
DECODE_QSTR;
PUSH(mp_load_global(qst));
DISPATCH();
}
- #else
- ENTRY(MP_BC_LOAD_GLOBAL): {
- MARK_EXC_IP_SELECTIVE();
- DECODE_QSTR;
- mp_map_elem_t *elem = mp_map_cached_lookup(&mp_globals_get()->map, qst, (uint8_t*)ip);
- mp_obj_t obj;
- if (elem != NULL) {
- obj = elem->value;
- } else {
- obj = mp_load_global(qst);
- }
- PUSH(obj);
- ip++;
- DISPATCH();
- }
- #endif
- #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
ENTRY(MP_BC_LOAD_ATTR): {
FRAME_UPDATE();
MARK_EXC_IP_SELECTIVE();
@@ -436,28 +384,6 @@ dispatch_loop:
SET_TOP(obj);
DISPATCH();
}
- #else
- ENTRY(MP_BC_LOAD_ATTR): {
- FRAME_UPDATE();
- MARK_EXC_IP_SELECTIVE();
- DECODE_QSTR;
- mp_obj_t top = TOP();
- mp_map_elem_t *elem = NULL;
- if (mp_obj_is_instance_type(mp_obj_get_type(top))) {
- mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
- elem = mp_map_cached_lookup(&self->members, qst, (uint8_t*)ip);
- }
- mp_obj_t obj;
- if (elem != NULL) {
- obj = elem->value;
- } else {
- obj = mp_load_attr(top, qst);
- }
- SET_TOP(obj);
- ip++;
- DISPATCH();
- }
- #endif
ENTRY(MP_BC_LOAD_METHOD): {
MARK_EXC_IP_SELECTIVE();
@@ -513,7 +439,6 @@ dispatch_loop:
DISPATCH();
}
- #if !MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE
ENTRY(MP_BC_STORE_ATTR): {
FRAME_UPDATE();
MARK_EXC_IP_SELECTIVE();
@@ -522,32 +447,6 @@ dispatch_loop:
sp -= 2;
DISPATCH();
}
- #else
- // This caching code works with MICROPY_PY_BUILTINS_PROPERTY and/or
- // MICROPY_PY_DESCRIPTORS enabled because if the attr exists in
- // self->members then it can't be a property or have descriptors. A
- // consequence of this is that we can't use MP_MAP_LOOKUP_ADD_IF_NOT_FOUND
- // in the fast-path below, because that store could override a property.
- ENTRY(MP_BC_STORE_ATTR): {
- FRAME_UPDATE();
- MARK_EXC_IP_SELECTIVE();
- DECODE_QSTR;
- mp_map_elem_t *elem = NULL;
- mp_obj_t top = TOP();
- if (mp_obj_is_instance_type(mp_obj_get_type(top)) && sp[-1] != MP_OBJ_NULL) {
- mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
- elem = mp_map_cached_lookup(&self->members, qst, (uint8_t*)ip);
- }
- if (elem != NULL) {
- elem->value = sp[-1];
- } else {
- mp_store_attr(sp[0], qst, sp[-1]);
- }
- sp -= 2;
- ip++;
- DISPATCH();
- }
- #endif
ENTRY(MP_BC_STORE_SUBSCR):
MARK_EXC_IP_SELECTIVE();