diff options
author | Alessandro Gatti <a.gatti@frob.it> | 2025-05-22 12:20:43 +0200 |
---|---|---|
committer | Damien George <damien@micropython.org> | 2025-06-10 11:29:02 +1000 |
commit | 78ee1bac60a89f13d62395c1f7b6b122e26af98a (patch) | |
tree | f574e6848b2f29517c8dde78f6f4dcf4a316a98e /py/asmx86.h | |
parent | e43a3849d9b725f7a59dd17623193738b7ea04a5 (diff) |
py/emitnative: Let Viper int-indexed code use appropriate operands.
This commit extends the generic ASM API by adding the rest of the
ASM_{LOAD,STORE}[size]_REG_REG_OFFSET macros whenever applicable.
The Viper int-indexed load/store code generator was changed to use those
API functions if they are available, falling back to backend-specific
implementations if possible and ultimately to a generic implementation.
Right now all backends except for x64 implement load16, load32, and
store32 operations (x64 only implements load16).
Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
Diffstat (limited to 'py/asmx86.h')
-rw-r--r-- | py/asmx86.h | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/py/asmx86.h b/py/asmx86.h index af73c163b..5011e56d0 100644 --- a/py/asmx86.h +++ b/py/asmx86.h @@ -200,16 +200,18 @@ void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r #define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src)) #define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest)) +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) ASM_LOAD32_REG_REG_OFFSET((as), (reg_dest), (reg_base), (word_offset)) #define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest)) #define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest)) #define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 2 * (uint16_offset), (reg_dest)) #define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest)) -#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset)) +#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) ASM_STORE32_REG_REG_OFFSET((as), (reg_src), (reg_base), (word_offset)) #define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0) #define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0) #define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) +#define ASM_STORE32_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset)) #endif // GENERIC_ASM_API |