summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlessandro Gatti <a.gatti@frob.it>2024-08-25 16:28:35 +0200
committerDamien George <damien@micropython.org>2025-01-02 11:49:10 +1100
commit268acb714dd79fa5eeeb82c1fca022bc4ea126b7 (patch)
tree428ed75070ee89847fd5087095e3d7331d5f9b26
parent3044233ea3726e9d8727d8f6a76f32c48e6fae5e (diff)
py/emitinlinerv32: Add inline assembler support for RV32.
This commit adds support for writing inline assembler functions when targeting a RV32IMC processor. Given that this takes up a bit of rodata space due to its large instruction decoding table and its extensive error messages, it is enabled by default only on offline targets such as mpy-cross and the qemu port. Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
-rw-r--r--mpy-cross/mpconfigport.h1
-rw-r--r--ports/qemu/Makefile4
-rw-r--r--ports/qemu/boards/MICROBIT.mk3
-rw-r--r--ports/qemu/boards/MPS2_AN385.mk3
-rw-r--r--ports/qemu/boards/NETDUINO2.mk3
-rw-r--r--ports/qemu/boards/SABRELITE.mk2
-rw-r--r--ports/qemu/boards/VIRT_RV32.mk2
-rw-r--r--ports/qemu/mpconfigport.h1
-rw-r--r--ports/qemu/test-frzmpy/frozen_asm_rv32.py31
-rw-r--r--py/asmrv32.c34
-rw-r--r--py/asmrv32.h302
-rw-r--r--py/compile.c6
-rw-r--r--py/emit.h3
-rw-r--r--py/emitinlinerv32.c818
-rw-r--r--py/mpconfig.h7
-rw-r--r--py/py.cmake1
-rw-r--r--py/py.mk1
-rw-r--r--tests/inlineasm/rv32/asmargs.py44
-rw-r--r--tests/inlineasm/rv32/asmargs.py.exp5
-rw-r--r--tests/inlineasm/rv32/asmarith.py79
-rw-r--r--tests/inlineasm/rv32/asmarith.py.exp14
-rw-r--r--tests/inlineasm/rv32/asmbranch.py161
-rw-r--r--tests/inlineasm/rv32/asmbranch.py.exp32
-rw-r--r--tests/inlineasm/rv32/asmconst.py49
-rw-r--r--tests/inlineasm/rv32/asmconst.py.exp5
-rw-r--r--tests/inlineasm/rv32/asmcsr.py65
-rw-r--r--tests/inlineasm/rv32/asmcsr.py.exp2
-rw-r--r--tests/inlineasm/rv32/asmdata.py33
-rw-r--r--tests/inlineasm/rv32/asmdata.py.exp10
-rw-r--r--tests/inlineasm/rv32/asmdivmul.py63
-rw-r--r--tests/inlineasm/rv32/asmdivmul.py.exp18
-rw-r--r--tests/inlineasm/rv32/asmjump.py115
-rw-r--r--tests/inlineasm/rv32/asmjump.py.exp6
-rw-r--r--tests/inlineasm/rv32/asmloadstore.py86
-rw-r--r--tests/inlineasm/rv32/asmloadstore.py.exp3
-rw-r--r--tests/inlineasm/rv32/asmrettype.py33
-rw-r--r--tests/inlineasm/rv32/asmrettype.py.exp4
-rw-r--r--tests/inlineasm/rv32/asmsanity.py204
-rw-r--r--tests/inlineasm/rv32/asmsanity.py.exp162
-rw-r--r--tests/inlineasm/rv32/asmshift.py121
-rw-r--r--tests/inlineasm/rv32/asmshift.py.exp15
-rw-r--r--tests/inlineasm/rv32/asmstack.py65
-rw-r--r--tests/inlineasm/rv32/asmstack.py.exp3
-rw-r--r--tests/inlineasm/rv32/asmsum.py59
-rw-r--r--tests/inlineasm/rv32/asmsum.py.exp3
-rw-r--r--tests/ports/qemu/asm_test.py8
-rwxr-xr-xtests/run-tests.py5
47 files changed, 2649 insertions, 45 deletions
diff --git a/mpy-cross/mpconfigport.h b/mpy-cross/mpconfigport.h
index 8723472eb..d3805f030 100644
--- a/mpy-cross/mpconfigport.h
+++ b/mpy-cross/mpconfigport.h
@@ -47,6 +47,7 @@
#define MICROPY_EMIT_INLINE_XTENSA (1)
#define MICROPY_EMIT_XTENSAWIN (1)
#define MICROPY_EMIT_RV32 (1)
+#define MICROPY_EMIT_INLINE_RV32 (1)
#define MICROPY_EMIT_NATIVE_DEBUG (1)
#define MICROPY_EMIT_NATIVE_DEBUG_PRINTER (&mp_stdout_print)
diff --git a/ports/qemu/Makefile b/ports/qemu/Makefile
index b85ff2896..ea0bef814 100644
--- a/ports/qemu/Makefile
+++ b/ports/qemu/Makefile
@@ -19,10 +19,10 @@ QSTR_DEFS = qstrdefsport.h
MICROPY_ROM_TEXT_COMPRESSION ?= 1
ifeq ($(QEMU_ARCH),arm)
-FROZEN_MANIFEST ?= "require('unittest'); freeze('test-frzmpy')"
+FROZEN_MANIFEST ?= "require('unittest'); freeze('test-frzmpy', ('frozen_asm_thumb.py', 'frozen_const.py', 'frozen_viper.py', 'native_frozen_align.py'))"
endif
ifeq ($(QEMU_ARCH),riscv32)
-FROZEN_MANIFEST ?= "require('unittest'); freeze('test-frzmpy', ('frozen_const.py', 'frozen_viper.py', 'native_frozen_align.py'))"
+FROZEN_MANIFEST ?= "require('unittest'); freeze('test-frzmpy', ('frozen_asm_rv32.py', 'frozen_const.py', 'frozen_viper.py', 'native_frozen_align.py'))"
endif
# include py core make definitions
diff --git a/ports/qemu/boards/MICROBIT.mk b/ports/qemu/boards/MICROBIT.mk
index 02eb0576c..d6cfd7e22 100644
--- a/ports/qemu/boards/MICROBIT.mk
+++ b/ports/qemu/boards/MICROBIT.mk
@@ -11,3 +11,6 @@ LDSCRIPT = mcu/arm/nrf51.ld
SRC_BOARD_O = shared/runtime/gchelper_native.o shared/runtime/gchelper_thumb1.o
MPY_CROSS_FLAGS += -march=armv7m
+
+# These RV32 tests don't run on Thumb, so exclude them.
+RUN_TESTS_ARGS = --exclude 'inlineasm/rv32'
diff --git a/ports/qemu/boards/MPS2_AN385.mk b/ports/qemu/boards/MPS2_AN385.mk
index 182d076eb..8d5c27df5 100644
--- a/ports/qemu/boards/MPS2_AN385.mk
+++ b/ports/qemu/boards/MPS2_AN385.mk
@@ -10,3 +10,6 @@ LDSCRIPT = mcu/arm/mps2.ld
SRC_BOARD_O = shared/runtime/gchelper_native.o shared/runtime/gchelper_thumb2.o
MPY_CROSS_FLAGS += -march=armv7m
+
+# These RV32 tests don't run on Thumb, so exclude them.
+RUN_TESTS_ARGS = --exclude 'inlineasm/rv32'
diff --git a/ports/qemu/boards/NETDUINO2.mk b/ports/qemu/boards/NETDUINO2.mk
index ffa781f33..f88ea32e7 100644
--- a/ports/qemu/boards/NETDUINO2.mk
+++ b/ports/qemu/boards/NETDUINO2.mk
@@ -10,3 +10,6 @@ LDSCRIPT = mcu/arm/stm32.ld
SRC_BOARD_O = shared/runtime/gchelper_native.o shared/runtime/gchelper_thumb2.o
MPY_CROSS_FLAGS += -march=armv7m
+
+# These RV32 tests don't run on Thumb, so exclude them.
+RUN_TESTS_ARGS = --exclude 'inlineasm/rv32'
diff --git a/ports/qemu/boards/SABRELITE.mk b/ports/qemu/boards/SABRELITE.mk
index 839b3d6ac..d3f4e14d4 100644
--- a/ports/qemu/boards/SABRELITE.mk
+++ b/ports/qemu/boards/SABRELITE.mk
@@ -16,4 +16,4 @@ SRC_BOARD_O = shared/runtime/gchelper_generic.o
MPY_CROSS_FLAGS += -march=armv6
# These tests don't work on Cortex-A9, so exclude them.
-RUN_TESTS_ARGS = --exclude 'inlineasm/thumb/(asmdiv|asmspecialregs).py'
+RUN_TESTS_ARGS = --exclude 'inlineasm/rv32|inlineasm/thumb/(asmdiv|asmspecialregs).py'
diff --git a/ports/qemu/boards/VIRT_RV32.mk b/ports/qemu/boards/VIRT_RV32.mk
index e166165a1..80dd5d56f 100644
--- a/ports/qemu/boards/VIRT_RV32.mk
+++ b/ports/qemu/boards/VIRT_RV32.mk
@@ -11,6 +11,6 @@ SRC_BOARD_O += shared/runtime/gchelper_native.o shared/runtime/gchelper_rv32i.o
MPY_CROSS_FLAGS += -march=rv32imc
# These Thumb tests don't run on RV32, so exclude them.
-RUN_TESTS_ARGS = --exclude 'inlineasm/thumb|qemu/asm_test'
+RUN_TESTS_ARGS = --exclude 'inlineasm/thumb'
RUN_NATMODTESTS_ARGS = --arch rv32imc
diff --git a/ports/qemu/mpconfigport.h b/ports/qemu/mpconfigport.h
index d3e3c0a3f..8f55d284f 100644
--- a/ports/qemu/mpconfigport.h
+++ b/ports/qemu/mpconfigport.h
@@ -39,6 +39,7 @@
#define MICROPY_MAKE_POINTER_CALLABLE(p) ((void *)((mp_uint_t)(p) | 1))
#elif defined(__riscv)
#define MICROPY_EMIT_RV32 (1)
+#define MICROPY_EMIT_INLINE_RV32 (1)
#endif
#define MICROPY_MALLOC_USES_ALLOCATED_SIZE (1)
diff --git a/ports/qemu/test-frzmpy/frozen_asm_rv32.py b/ports/qemu/test-frzmpy/frozen_asm_rv32.py
new file mode 100644
index 000000000..ce1bd428c
--- /dev/null
+++ b/ports/qemu/test-frzmpy/frozen_asm_rv32.py
@@ -0,0 +1,31 @@
+# Test freezing inline-asm code.
+
+# ruff: noqa: F821 - @asm_rv32 decorator adds names to function scope
+
+import micropython
+
+
+@micropython.asm_rv32
+def asm_add(a0, a1):
+ add(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def asm_add1(a0) -> object:
+ slli(a0, a0, 1)
+ addi(a0, a0, 3)
+
+
+@micropython.asm_rv32
+def asm_cast_bool(a0) -> bool:
+ pass
+
+
+@micropython.asm_rv32
+def asm_shift_int(a0) -> int:
+ slli(a0, a0, 29)
+
+
+@micropython.asm_rv32
+def asm_shift_uint(a0) -> uint:
+ slli(a0, a0, 29)
diff --git a/py/asmrv32.c b/py/asmrv32.c
index 6aa3ec165..c24d05a13 100644
--- a/py/asmrv32.c
+++ b/py/asmrv32.c
@@ -45,12 +45,6 @@
#endif
#define INTERNAL_TEMPORARY ASM_RV32_REG_S0
-#define AVAILABLE_REGISTERS_COUNT 32
-
-#define IS_IN_C_REGISTER_WINDOW(register_number) \
- (((register_number) >= ASM_RV32_REG_X8) && ((register_number) <= ASM_RV32_REG_X15))
-#define MAP_IN_C_REGISTER_WINDOW(register_number) \
- ((register_number) - ASM_RV32_REG_X8)
#define FIT_UNSIGNED(value, bits) (((value) & ~((1U << (bits)) - 1)) == 0)
#define FIT_SIGNED(value, bits) \
@@ -106,7 +100,6 @@ static void split_immediate(mp_int_t immediate, mp_uint_t *upper, mp_uint_t *low
// Turn the lower half from unsigned to signed.
if ((*lower & 0x800) != 0) {
*upper += 0x1000;
- *lower -= 0x1000;
}
}
@@ -180,7 +173,7 @@ void asm_rv32_emit_optimised_load_immediate(asm_rv32_t *state, mp_uint_t rd, mp_
static void emit_registers_store(asm_rv32_t *state, mp_uint_t registers_mask) {
mp_uint_t offset = 0;
- for (mp_uint_t register_index = 0; register_index < AVAILABLE_REGISTERS_COUNT; register_index++) {
+ for (mp_uint_t register_index = 0; register_index < RV32_AVAILABLE_REGISTERS_COUNT; register_index++) {
if (registers_mask & (1U << register_index)) {
assert(FIT_UNSIGNED(offset >> 2, 6) && "Registers save stack offset out of range.");
// c.swsp register, offset
@@ -192,7 +185,7 @@ static void emit_registers_store(asm_rv32_t *state, mp_uint_t registers_mask) {
static void emit_registers_load(asm_rv32_t *state, mp_uint_t registers_mask) {
mp_uint_t offset = 0;
- for (mp_uint_t register_index = 0; register_index < AVAILABLE_REGISTERS_COUNT; register_index++) {
+ for (mp_uint_t register_index = 0; register_index < RV32_AVAILABLE_REGISTERS_COUNT; register_index++) {
if (registers_mask & (1U << register_index)) {
assert(FIT_UNSIGNED(offset >> 2, 6) && "Registers load stack offset out of range.");
// c.lwsp register, offset
@@ -262,7 +255,7 @@ static bool calculate_displacement_for_label(asm_rv32_t *state, mp_uint_t label,
void asm_rv32_entry(asm_rv32_t *state, mp_uint_t locals) {
state->saved_registers_mask |= (1U << REG_FUN_TABLE) | (1U << REG_LOCAL_1) | \
- (1U << REG_LOCAL_2) | (1U << REG_LOCAL_3) | (1U << INTERNAL_TEMPORARY);
+ (1U << REG_LOCAL_2) | (1U << REG_LOCAL_3);
state->locals_count = locals;
emit_function_prologue(state, state->saved_registers_mask);
}
@@ -281,10 +274,11 @@ void asm_rv32_emit_call_ind(asm_rv32_t *state, mp_uint_t index) {
mp_uint_t offset = index * ASM_WORD_SIZE;
state->saved_registers_mask |= (1U << ASM_RV32_REG_RA);
- if (IS_IN_C_REGISTER_WINDOW(REG_FUN_TABLE) && IS_IN_C_REGISTER_WINDOW(INTERNAL_TEMPORARY) && FIT_UNSIGNED(offset, 6)) {
+ if (RV32_IS_IN_C_REGISTER_WINDOW(REG_FUN_TABLE) && RV32_IS_IN_C_REGISTER_WINDOW(INTERNAL_TEMPORARY) && FIT_UNSIGNED(offset, 6)) {
+ state->saved_registers_mask |= (1U << INTERNAL_TEMPORARY);
// c.lw temporary, offset(fun_table)
// c.jalr temporary
- asm_rv32_opcode_clw(state, MAP_IN_C_REGISTER_WINDOW(INTERNAL_TEMPORARY), MAP_IN_C_REGISTER_WINDOW(REG_FUN_TABLE), offset);
+ asm_rv32_opcode_clw(state, RV32_MAP_IN_C_REGISTER_WINDOW(INTERNAL_TEMPORARY), RV32_MAP_IN_C_REGISTER_WINDOW(REG_FUN_TABLE), offset);
asm_rv32_opcode_cjalr(state, INTERNAL_TEMPORARY);
return;
}
@@ -341,9 +335,9 @@ void asm_rv32_emit_jump_if_reg_nonzero(asm_rv32_t *state, mp_uint_t rs, mp_uint_
ptrdiff_t displacement = 0;
bool can_emit_short_jump = calculate_displacement_for_label(state, label, &displacement);
- if (can_emit_short_jump && FIT_SIGNED(displacement, 8) && IS_IN_C_REGISTER_WINDOW(rs)) {
+ if (can_emit_short_jump && FIT_SIGNED(displacement, 8) && RV32_IS_IN_C_REGISTER_WINDOW(rs)) {
// c.bnez rs', displacement
- asm_rv32_opcode_cbnez(state, MAP_IN_C_REGISTER_WINDOW(rs), displacement);
+ asm_rv32_opcode_cbnez(state, RV32_MAP_IN_C_REGISTER_WINDOW(rs), displacement);
return;
}
@@ -364,8 +358,8 @@ void asm_rv32_emit_jump_if_reg_nonzero(asm_rv32_t *state, mp_uint_t rs, mp_uint_
// jalr zero, temporary, LO(displacement) ; PC + 8
// ... ; PC + 12
- if (can_emit_short_jump && IS_IN_C_REGISTER_WINDOW(rs)) {
- asm_rv32_opcode_cbeqz(state, MAP_IN_C_REGISTER_WINDOW(rs), 10);
+ if (can_emit_short_jump && RV32_IS_IN_C_REGISTER_WINDOW(rs)) {
+ asm_rv32_opcode_cbeqz(state, RV32_MAP_IN_C_REGISTER_WINDOW(rs), 10);
// Compensate for the C.BEQZ opcode.
displacement -= ASM_HALFWORD_SIZE;
} else {
@@ -438,9 +432,9 @@ void asm_rv32_emit_mov_reg_local(asm_rv32_t *state, mp_uint_t rd, mp_uint_t loca
void asm_rv32_emit_mov_reg_local_addr(asm_rv32_t *state, mp_uint_t rd, mp_uint_t local) {
mp_uint_t offset = state->locals_stack_offset + (local * ASM_WORD_SIZE);
- if (FIT_UNSIGNED(offset, 10) && offset != 0 && IS_IN_C_REGISTER_WINDOW(rd)) {
+ if (FIT_UNSIGNED(offset, 10) && offset != 0 && RV32_IS_IN_C_REGISTER_WINDOW(rd)) {
// c.addi4spn rd', offset
- asm_rv32_opcode_caddi4spn(state, MAP_IN_C_REGISTER_WINDOW(rd), offset);
+ asm_rv32_opcode_caddi4spn(state, RV32_MAP_IN_C_REGISTER_WINDOW(rd), offset);
return;
}
@@ -459,9 +453,9 @@ void asm_rv32_emit_mov_reg_local_addr(asm_rv32_t *state, mp_uint_t rd, mp_uint_t
void asm_rv32_emit_load_reg_reg_offset(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset) {
mp_int_t scaled_offset = offset * sizeof(ASM_WORD_SIZE);
- if (scaled_offset >= 0 && IS_IN_C_REGISTER_WINDOW(rd) && IS_IN_C_REGISTER_WINDOW(rs) && FIT_UNSIGNED(scaled_offset, 6)) {
+ if (scaled_offset >= 0 && RV32_IS_IN_C_REGISTER_WINDOW(rd) && RV32_IS_IN_C_REGISTER_WINDOW(rs) && FIT_UNSIGNED(scaled_offset, 6)) {
// c.lw rd', offset(rs')
- asm_rv32_opcode_clw(state, MAP_IN_C_REGISTER_WINDOW(rd), MAP_IN_C_REGISTER_WINDOW(rs), scaled_offset);
+ asm_rv32_opcode_clw(state, RV32_MAP_IN_C_REGISTER_WINDOW(rd), RV32_MAP_IN_C_REGISTER_WINDOW(rs), scaled_offset);
return;
}
diff --git a/py/asmrv32.h b/py/asmrv32.h
index 775cf1ffc..584e3ffd2 100644
--- a/py/asmrv32.h
+++ b/py/asmrv32.h
@@ -74,9 +74,6 @@
#define ASM_RV32_REG_SP (ASM_RV32_REG_X2)
#define ASM_RV32_REG_GP (ASM_RV32_REG_X3)
#define ASM_RV32_REG_TP (ASM_RV32_REG_X4)
-#define ASM_RV32_REG_T0 (ASM_RV32_REG_X5)
-#define ASM_RV32_REG_T1 (ASM_RV32_REG_X6)
-#define ASM_RV32_REG_T2 (ASM_RV32_REG_X7)
#define ASM_RV32_REG_A0 (ASM_RV32_REG_X10)
#define ASM_RV32_REG_A1 (ASM_RV32_REG_X11)
#define ASM_RV32_REG_A2 (ASM_RV32_REG_X12)
@@ -85,6 +82,9 @@
#define ASM_RV32_REG_A5 (ASM_RV32_REG_X15)
#define ASM_RV32_REG_A6 (ASM_RV32_REG_X16)
#define ASM_RV32_REG_A7 (ASM_RV32_REG_X17)
+#define ASM_RV32_REG_T0 (ASM_RV32_REG_X5)
+#define ASM_RV32_REG_T1 (ASM_RV32_REG_X6)
+#define ASM_RV32_REG_T2 (ASM_RV32_REG_X7)
#define ASM_RV32_REG_T3 (ASM_RV32_REG_X28)
#define ASM_RV32_REG_T4 (ASM_RV32_REG_X29)
#define ASM_RV32_REG_T5 (ASM_RV32_REG_X30)
@@ -103,6 +103,12 @@
#define ASM_RV32_REG_S10 (ASM_RV32_REG_X26)
#define ASM_RV32_REG_S11 (ASM_RV32_REG_X27)
+#define RV32_AVAILABLE_REGISTERS_COUNT 32
+#define RV32_MAP_IN_C_REGISTER_WINDOW(register_number) \
+ ((register_number) - ASM_RV32_REG_X8)
+#define RV32_IS_IN_C_REGISTER_WINDOW(register_number) \
+ (((register_number) >= ASM_RV32_REG_X8) && ((register_number) <= ASM_RV32_REG_X15))
+
typedef struct _asm_rv32_t {
// Opaque emitter state.
mp_asm_base_t base;
@@ -127,6 +133,10 @@ void asm_rv32_end_pass(asm_rv32_t *state);
((imm & 0x1E) << 7) | ((rs1 & 0x1F) << 15) | ((rs2 & 0x1F) << 20) | \
((imm & 0x7E0) << 20) | ((imm & 0x1000) << 19))
+#define RV32_ENCODE_TYPE_CSRI(op, ft3, rd, csr, imm) \
+ ((op & 0x7F) | ((rd & 0x1F) << 7) | ((ft3 & 0x07) << 12) | \
+ ((csr & 0xFFF) << 20) | ((imm & 0x1F) << 15))
+
#define RV32_ENCODE_TYPE_I(op, ft3, rd, rs, imm) \
((op & 0x7F) | ((rd & 0x1F) << 7) | ((ft3 & 0x07) << 12) | \
((rs & 0x1F) << 15) | ((imm & 0xFFF) << 20))
@@ -143,13 +153,16 @@ void asm_rv32_end_pass(asm_rv32_t *state);
((op & 0x7F) | ((imm & 0x1F) << 7) | ((ft3 & 0x07) << 12) | \
((rs1 & 0x1F) << 15) | ((rs2 & 0x1F) << 20) | ((imm & 0xFE0) << 20))
+#define RV32_ENCODE_TYPE_CA(op, ft6, ft2, rd, rs) \
+ ((op & 0x03) | ((ft6 & 0x3F) << 10) | ((ft2 & 0x03) << 5) | \
+ ((rd & 0x03) << 7) | ((rs & 0x03) << 2))
+
#define RV32_ENCODE_TYPE_U(op, rd, imm) \
((op & 0x7F) | ((rd & 0x1F) << 7) | (imm & 0xFFFFF000))
#define RV32_ENCODE_TYPE_CB(op, ft3, rs, imm) \
((op & 0x03) | ((ft3 & 0x07) << 13) | ((rs & 0x07) << 7) | \
- (((imm) & 0x100) << 4) | (((imm) & 0xC0) >> 1) | (((imm) & 0x20) >> 3) | \
- (((imm) & 0x18) << 7) | (((imm) & 0x06) << 2))
+ (((imm) & 0xE0) << 5) | (((imm) & 0x1F) << 2))
#define RV32_ENCODE_TYPE_CI(op, ft3, rd, imm) \
((op & 0x03) | ((ft3 & 0x07) << 13) | ((rd & 0x1F) << 7) | \
@@ -174,6 +187,11 @@ void asm_rv32_end_pass(asm_rv32_t *state);
#define RV32_ENCODE_TYPE_CR(op, ft4, rs1, rs2) \
((op & 0x03) | ((rs2 & 0x1F) << 2) | ((rs1 & 0x1F) << 7) | ((ft4 & 0x0F) << 12))
+#define RV32_ENCODE_TYPE_CS(op, ft3, rd, rs, imm) \
+ ((op & 0x03) | ((ft3 & 0x07) << 13) | ((rd & 0x07) << 2) | \
+ ((rs & 0x07) << 7) | ((imm & 0x40) >> 1) | ((imm & 0x38) << 7) | \
+ ((imm & 0x04) << 4))
+
#define RV32_ENCODE_TYPE_CSS(op, ft3, rs, imm) \
((op & 0x03) | ((ft3 & 0x07) << 13) | ((rs & 0x1F) << 2) | ((imm) & 0x3F) << 7)
@@ -198,6 +216,12 @@ static inline void asm_rv32_opcode_and(asm_rv32_t *state, mp_uint_t rd, mp_uint_
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x07, 0x00, rd, rs1, rs2));
}
+// ANDI RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_andi(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 111 ..... 0010011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x07, rd, rs, immediate));
+}
+
// AUIPC RD, offset
static inline void asm_rv32_opcode_auipc(asm_rv32_t *state, mp_uint_t rd, mp_int_t offset) {
// U: .................... ..... 0010111
@@ -210,6 +234,30 @@ static inline void asm_rv32_opcode_beq(asm_rv32_t *state, mp_uint_t rs1, mp_uint
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_B(0x63, 0x00, rs1, rs2, offset));
}
+// BGE RS1, RS2, OFFSET
+static inline void asm_rv32_opcode_bge(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_int_t offset) {
+ // B: . ...... ..... ..... 101 .... . 1100011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_B(0x63, 0x05, rs1, rs2, offset));
+}
+
+// BGEU RS1, RS2, OFFSET
+static inline void asm_rv32_opcode_bgeu(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_int_t offset) {
+ // B: . ...... ..... ..... 111 .... . 1100011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_B(0x63, 0x07, rs1, rs2, offset));
+}
+
+// BLT RS1, RS2, OFFSET
+static inline void asm_rv32_opcode_blt(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_int_t offset) {
+ // B: . ...... ..... ..... 100 .... . 1100011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_B(0x63, 0x04, rs1, rs2, offset));
+}
+
+// BLTU RS1, RS2, OFFSET
+static inline void asm_rv32_opcode_bltu(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_int_t offset) {
+ // B: . ...... ..... ..... 110 .... . 1100011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_B(0x63, 0x06, rs1, rs2, offset));
+}
+
// BNE RS1, RS2, OFFSET
static inline void asm_rv32_opcode_bne(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_int_t offset) {
// B: . ...... ..... ..... 001 .... . 1100011
@@ -234,16 +282,39 @@ static inline void asm_rv32_opcode_caddi4spn(asm_rv32_t *state, mp_uint_t rd, mp
asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CIW(0x00, 0x00, rd, immediate));
}
+// C.AND RD', RS'
+static inline void asm_rv32_opcode_cand(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs) {
+ // CA: 100011 ... 11 ... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CA(0x01, 0x23, 0x03, rd, rs));
+}
+
+// C.ANDI RD', IMMEDIATE
+static inline void asm_rv32_opcode_candi(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate) {
+ // CB: 100 . 10 ... ..... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x04, rd,
+ (((immediate & 0x20) << 2) | (immediate & 0x1F) | 0x40)));
+}
+
// C.BEQZ RS', IMMEDIATE
static inline void asm_rv32_opcode_cbeqz(asm_rv32_t *state, mp_uint_t rs, mp_int_t offset) {
// CB: 110 ... ... ..... 01
- asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x06, rs, offset));
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x06, rs,
+ (((offset & 0x100) >> 1) | ((offset & 0xC0) >> 3) | ((offset & 0x20) >> 5) |
+ ((offset & 0x18) << 2) | (offset & 0x06))));
}
// C.BNEZ RS', IMMEDIATE
static inline void asm_rv32_opcode_cbnez(asm_rv32_t *state, mp_uint_t rs, mp_int_t offset) {
// CB: 111 ... ... ..... 01
- asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x07, rs, offset));
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x07, rs,
+ (((offset & 0x100) >> 1) | ((offset & 0xC0) >> 3) | ((offset & 0x20) >> 5) |
+ ((offset & 0x18) << 2) | (offset & 0x06))));
+}
+
+// C.EBREAK
+static inline void asm_rv32_opcode_cebreak(asm_rv32_t *state) {
+ // CA: 100 1 00000 00000 10
+ asm_rv32_emit_halfword_opcode(state, 0x9002);
}
// C.J OFFSET
@@ -252,6 +323,12 @@ static inline void asm_rv32_opcode_cj(asm_rv32_t *state, mp_int_t offset) {
asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CJ(0x01, 0x05, offset));
}
+// C.JAL OFFSET
+static inline void asm_rv32_opcode_cjal(asm_rv32_t *state, mp_int_t offset) {
+ // CJ: 001 ........... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CJ(0x01, 0x01, offset));
+}
+
// C.JALR RS
static inline void asm_rv32_opcode_cjalr(asm_rv32_t *state, mp_uint_t rs) {
// CR: 1001 ..... 00000 10
@@ -294,31 +371,159 @@ static inline void asm_rv32_opcode_cmv(asm_rv32_t *state, mp_uint_t rd, mp_uint_
asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CR(0x02, 0x08, rd, rs));
}
+// C.NOP
+static inline void asm_rv32_opcode_cnop(asm_rv32_t *state) {
+ // CI: 000 . 00000 ..... 01
+ asm_rv32_emit_halfword_opcode(state, 0x0001);
+}
+
+// C.OR RD', RS'
+static inline void asm_rv32_opcode_cor(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs) {
+ // CA: 100011 ... 10 ... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CA(0x01, 0x23, 0x02, rd, rs));
+}
+
+// C.SLLI RD, IMMEDIATE
+static inline void asm_rv32_opcode_cslli(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate) {
+ // CI: 000 . ..... ..... 10
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CI(0x02, 0x00, rd, immediate));
+}
+
+// C.SRAI RD, IMMEDIATE
+static inline void asm_rv32_opcode_csrai(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate) {
+ // CB: 100 . 01 ... ..... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x04, rd,
+ (((immediate & 0x20) << 2) | (immediate & 0x1F) | 0x20)));
+}
+
+// C.SRLI RD, IMMEDIATE
+static inline void asm_rv32_opcode_csrli(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate) {
+ // CB: 100 . 00 ... ..... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CB(0x01, 0x04, rd,
+ (((immediate & 0x20) << 2) | (immediate & 0x1F))));
+}
+
+// C.SUB RD', RS'
+static inline void asm_rv32_opcode_csub(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs) {
+ // CA: 100011 ... 00 ... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CA(0x01, 0x23, 0x00, rd, rs));
+}
+
+// C.SW RS1', OFFSET(RS2')
+static inline void asm_rv32_opcode_csw(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_int_t offset) {
+ // CS: 110 ... ... .. ... 00
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CL(0x00, 0x06, rs1, rs2, offset));
+}
+
// C.SWSP RS, OFFSET
static inline void asm_rv32_opcode_cswsp(asm_rv32_t *state, mp_uint_t rs, mp_uint_t offset) {
// CSS: 010 ...... ..... 10
asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CSS(0x02, 0x06, rs, ((offset & 0xC0) >> 6) | (offset & 0x3C)));
}
-// JALR RD, RS, offset
+// C.XOR RD', RS'
+static inline void asm_rv32_opcode_cxor(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs) {
+ // CA: 100011 ... 01 ... 01
+ asm_rv32_emit_halfword_opcode(state, RV32_ENCODE_TYPE_CA(0x01, 0x23, 0x01, rd, rs));
+}
+
+// CSRRC RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_csrrc(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 011 ..... 1110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x73, 0x03, rd, rs, immediate));
+}
+
+// CSRRS RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_csrrs(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 010 ..... 1110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x73, 0x02, rd, rs, immediate));
+}
+
+// CSRRW RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_csrrw(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 001 ..... 1110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x73, 0x01, rd, rs, immediate));
+}
+
+// CSRRCI RD, CSR, IMMEDIATE
+static inline void asm_rv32_opcode_csrrci(asm_rv32_t *state, mp_uint_t rd, mp_uint_t csr, mp_int_t immediate) {
+ // CSRI: ............ ..... 111 ..... 1110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_CSRI(0x73, 0x07, rd, csr, immediate));
+}
+
+// CSRRSI RD, CSR, IMMEDIATE
+static inline void asm_rv32_opcode_csrrsi(asm_rv32_t *state, mp_uint_t rd, mp_uint_t csr, mp_int_t immediate) {
+ // CSRI: ............ ..... 110 ..... 1110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_CSRI(0x73, 0x06, rd, csr, immediate));
+}
+
+// CSRRWI RD, CSR, IMMEDIATE
+static inline void asm_rv32_opcode_csrrwi(asm_rv32_t *state, mp_uint_t rd, mp_uint_t csr, mp_int_t immediate) {
+ // CSRI: ............ ..... 101 ..... 1110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_CSRI(0x73, 0x05, rd, csr, immediate));
+}
+
+// DIV RD, RS1, RS2
+static inline void asm_rv32_opcode_div(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 100 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x04, 0x01, rd, rs1, rs2));
+}
+
+// DIVU RD, RS1, RS2
+static inline void asm_rv32_opcode_divu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 101 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x05, 0x01, rd, rs1, rs2));
+}
+
+// EBREAK
+static inline void asm_rv32_opcode_ebreak(asm_rv32_t *state) {
+ // I: 000000000001 00000 000 00000 1110011
+ asm_rv32_emit_word_opcode(state, 0x100073);
+}
+
+// ECALL
+static inline void asm_rv32_opcode_ecall(asm_rv32_t *state) {
+ // I: 000000000000 00000 000 00000 1110011
+ asm_rv32_emit_word_opcode(state, 0x73);
+}
+
+// JAL RD, OFFSET
+static inline void asm_rv32_opcode_jal(asm_rv32_t *state, mp_uint_t rd, mp_int_t offset) {
+ // J: ......................... 1101111
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_J(0x6F, rd, offset));
+}
+
+// JALR RD, RS, OFFSET
static inline void asm_rv32_opcode_jalr(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset) {
// I: ............ ..... 000 ..... 1100111
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x67, 0x00, rd, rs, offset));
}
+// LB RD, OFFSET(RS)
+static inline void asm_rv32_opcode_lb(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset) {
+ // I: ............ ..... 000 ..... 0000011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x03, 0x00, rd, rs, offset));
+}
+
// LBU RD, OFFSET(RS)
static inline void asm_rv32_opcode_lbu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset) {
// I: ............ ..... 100 ..... 0000011
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x03, 0x04, rd, rs, offset));
}
+// LH RD, OFFSET(RS)
+static inline void asm_rv32_opcode_lh(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset) {
+ // I: ............ ..... 001 ..... 0000011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x03, 0x01, rd, rs, offset));
+}
+
// LHU RD, OFFSET(RS)
static inline void asm_rv32_opcode_lhu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset) {
// I: ............ ..... 101 ..... 0000011
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x03, 0x05, rd, rs, offset));
}
-// LUI RD, immediate
+// LUI RD, IMMEDIATE
static inline void asm_rv32_opcode_lui(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate) {
// U: .................... ..... 0110111
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_U(0x37, rd, immediate));
@@ -336,12 +541,48 @@ static inline void asm_rv32_opcode_mul(asm_rv32_t *state, mp_uint_t rd, mp_uint_
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x00, 0x01, rd, rs1, rs2));
}
+// MULH RD, RS1, RS2
+static inline void asm_rv32_opcode_mulh(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 001 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x01, 0x01, rd, rs1, rs2));
+}
+
+// MULHSU RD, RS1, RS2
+static inline void asm_rv32_opcode_mulhsu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 010 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x02, 0x01, rd, rs1, rs2));
+}
+
+// MULHU RD, RS1, RS2
+static inline void asm_rv32_opcode_mulhu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 011 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x03, 0x01, rd, rs1, rs2));
+}
+
// OR RD, RS1, RS2
static inline void asm_rv32_opcode_or(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
// R: 0000000 ..... ..... 110 ..... 0110011
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x06, 0x00, rd, rs1, rs2));
}
+// ORI RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_ori(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 110 ..... 0010011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x06, rd, rs, immediate));
+}
+
+// REM RD, RS1, RS2
+static inline void asm_rv32_opcode_rem(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 110 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x06, 0x01, rd, rs1, rs2));
+}
+
+// REMU RD, RS1, RS2
+static inline void asm_rv32_opcode_remu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000001 ..... ..... 111 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x07, 0x01, rd, rs1, rs2));
+}
+
// SLL RD, RS1, RS2
static inline void asm_rv32_opcode_sll(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
// R: 0000000 ..... ..... 001 ..... 0110011
@@ -349,23 +590,29 @@ static inline void asm_rv32_opcode_sll(asm_rv32_t *state, mp_uint_t rd, mp_uint_
}
// SLLI RD, RS, IMMEDIATE
-static inline void asm_rv32_opcode_slli(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_uint_t immediate) {
+static inline void asm_rv32_opcode_slli(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
// I: 0000000..... ..... 001 ..... 0010011
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x01, rd, rs, immediate & 0x1F));
}
-// SRL RD, RS1, RS2
-static inline void asm_rv32_opcode_srl(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
- // R: 0000000 ..... ..... 101 ..... 0110011
- asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x05, 0x00, rd, rs1, rs2));
-}
-
// SLT RD, RS1, RS2
static inline void asm_rv32_opcode_slt(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
// R: 0000000 ..... ..... 010 ..... 0110011
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x02, 0x00, rd, rs1, rs2));
}
+// SLTI RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_slti(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 010 ..... 0010011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x02, rd, rs, immediate));
+}
+
+// SLTIU RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_sltiu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: ............ ..... 011 ..... 0010011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x03, rd, rs, immediate));
+}
+
// SLTU RD, RS1, RS2
static inline void asm_rv32_opcode_sltu(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
// R: 0000000 ..... ..... 011 ..... 0110011
@@ -378,6 +625,24 @@ static inline void asm_rv32_opcode_sra(asm_rv32_t *state, mp_uint_t rd, mp_uint_
asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x05, 0x20, rd, rs1, rs2));
}
+// SRAI RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_srai(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: 0100000..... ..... 101 ..... 0010011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x05, rd, rs, ((immediate & 0x1F) | 0x400)));
+}
+
+// SRL RD, RS1, RS2
+static inline void asm_rv32_opcode_srl(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
+ // R: 0000000 ..... ..... 101 ..... 0110011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_R(0x33, 0x05, 0x00, rd, rs1, rs2));
+}
+
+// SRLI RD, RS, IMMEDIATE
+static inline void asm_rv32_opcode_srli(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t immediate) {
+ // I: 0000000..... ..... 101 ..... 0010011
+ asm_rv32_emit_word_opcode(state, RV32_ENCODE_TYPE_I(0x13, 0x05, rd, rs, immediate & 0x1F));
+}
+
// SUB RD, RS1, RS2
static inline void asm_rv32_opcode_sub(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2) {
// R: 0100000 ..... ..... 000 ..... 0110011
@@ -435,6 +700,8 @@ void asm_rv32_meta_comparison_ne(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2
void asm_rv32_meta_comparison_lt(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_uint_t rd, bool unsigned_comparison);
void asm_rv32_meta_comparison_le(asm_rv32_t *state, mp_uint_t rs1, mp_uint_t rs2, mp_uint_t rd, bool unsigned_comparison);
+void asm_rv32_emit_optimised_load_immediate(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate);
+
#ifdef GENERIC_ASM_API
void asm_rv32_emit_call_ind(asm_rv32_t *state, mp_uint_t index);
@@ -444,10 +711,9 @@ void asm_rv32_emit_jump_if_reg_nonzero(asm_rv32_t *state, mp_uint_t rs, mp_uint_
void asm_rv32_emit_load16_reg_reg_offset(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset);
void asm_rv32_emit_load_reg_reg_offset(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs, mp_int_t offset);
void asm_rv32_emit_mov_local_reg(asm_rv32_t *state, mp_uint_t local, mp_uint_t rs);
-void asm_rv32_emit_mov_reg_local(asm_rv32_t *state, mp_uint_t rd, mp_uint_t local);
void asm_rv32_emit_mov_reg_local_addr(asm_rv32_t *state, mp_uint_t rd, mp_uint_t local);
+void asm_rv32_emit_mov_reg_local(asm_rv32_t *state, mp_uint_t rd, mp_uint_t local);
void asm_rv32_emit_mov_reg_pcrel(asm_rv32_t *state, mp_uint_t rd, mp_uint_t label);
-void asm_rv32_emit_optimised_load_immediate(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate);
void asm_rv32_emit_optimised_xor(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs);
void asm_rv32_emit_store_reg_reg_offset(asm_rv32_t *state, mp_uint_t source, mp_uint_t base, mp_int_t offset);
diff --git a/py/compile.c b/py/compile.c
index d2af6aaf2..60f06d777 100644
--- a/py/compile.c
+++ b/py/compile.c
@@ -147,6 +147,7 @@ static const emit_inline_asm_method_table_t *emit_asm_table[] = {
&emit_inline_thumb_method_table,
&emit_inline_xtensa_method_table,
NULL,
+ &emit_inline_rv32_method_table,
};
#elif MICROPY_EMIT_INLINE_ASM
@@ -157,6 +158,9 @@ static const emit_inline_asm_method_table_t *emit_asm_table[] = {
#elif MICROPY_EMIT_INLINE_XTENSA
#define ASM_DECORATOR_QSTR MP_QSTR_asm_xtensa
#define ASM_EMITTER(f) emit_inline_xtensa_##f
+#elif MICROPY_EMIT_INLINE_RV32
+#define ASM_DECORATOR_QSTR MP_QSTR_asm_rv32
+#define ASM_EMITTER(f) emit_inline_rv32_##f
#else
#error "unknown asm emitter"
#endif
@@ -855,6 +859,8 @@ static bool compile_built_in_decorator(compiler_t *comp, size_t name_len, mp_par
*emit_options = MP_EMIT_OPT_ASM;
} else if (attr == MP_QSTR_asm_xtensa) {
*emit_options = MP_EMIT_OPT_ASM;
+ } else if (attr == MP_QSTR_asm_rv32) {
+ *emit_options = MP_EMIT_OPT_ASM;
#else
} else if (attr == ASM_DECORATOR_QSTR) {
*emit_options = MP_EMIT_OPT_ASM;
diff --git a/py/emit.h b/py/emit.h
index 623b16349..033ac9c76 100644
--- a/py/emit.h
+++ b/py/emit.h
@@ -307,12 +307,15 @@ typedef struct _emit_inline_asm_method_table_t {
void (*op)(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args);
} emit_inline_asm_method_table_t;
+extern const emit_inline_asm_method_table_t emit_inline_rv32_method_table;
extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table;
extern const emit_inline_asm_method_table_t emit_inline_xtensa_method_table;
+emit_inline_asm_t *emit_inline_rv32_new(mp_uint_t max_num_labels);
emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels);
emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels);
+void emit_inline_rv32_free(emit_inline_asm_t *emit);
void emit_inline_thumb_free(emit_inline_asm_t *emit);
void emit_inline_xtensa_free(emit_inline_asm_t *emit);
diff --git a/py/emitinlinerv32.c b/py/emitinlinerv32.c
new file mode 100644
index 000000000..179e289c6
--- /dev/null
+++ b/py/emitinlinerv32.c
@@ -0,0 +1,818 @@
+/*
+ * This file is part of the MicroPython project, https://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2024 Alessandro Gatti
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/asmrv32.h"
+#include "py/emit.h"
+#include "py/misc.h"
+
+#if MICROPY_EMIT_INLINE_RV32
+
+typedef enum {
+// define rules with a compile function
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ PN_const_object, // special node for a constant, generic Python object
+// define rules without a compile function
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) PN_##rule,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+} pn_kind_t;
+
+struct _emit_inline_asm_t {
+ asm_rv32_t as;
+ uint16_t pass;
+ mp_obj_t *error_slot;
+ mp_uint_t max_num_labels;
+ qstr *label_lookup;
+};
+
+static const qstr REGISTERS_QSTR_TABLE[] = {
+ MP_QSTR_zero, MP_QSTR_ra, MP_QSTR_sp, MP_QSTR_gp, MP_QSTR_tp, MP_QSTR_t0, MP_QSTR_t1, MP_QSTR_t2,
+ MP_QSTR_s0, MP_QSTR_s1, MP_QSTR_a0, MP_QSTR_a1, MP_QSTR_a2, MP_QSTR_a3, MP_QSTR_a4, MP_QSTR_a5,
+ MP_QSTR_a6, MP_QSTR_a7, MP_QSTR_s2, MP_QSTR_s3, MP_QSTR_s4, MP_QSTR_s5, MP_QSTR_s6, MP_QSTR_s7,
+ MP_QSTR_s8, MP_QSTR_s9, MP_QSTR_s10, MP_QSTR_s11, MP_QSTR_t3, MP_QSTR_t4, MP_QSTR_t5, MP_QSTR_t6,
+ MP_QSTR_x0, MP_QSTR_x1, MP_QSTR_x2, MP_QSTR_x3, MP_QSTR_x4, MP_QSTR_x5, MP_QSTR_x6, MP_QSTR_x7,
+ MP_QSTR_x8, MP_QSTR_x9, MP_QSTR_x10, MP_QSTR_x11, MP_QSTR_x12, MP_QSTR_x13, MP_QSTR_x14, MP_QSTR_x15,
+ MP_QSTR_x16, MP_QSTR_x17, MP_QSTR_x18, MP_QSTR_x19, MP_QSTR_x20, MP_QSTR_x21, MP_QSTR_x22, MP_QSTR_x23,
+ MP_QSTR_x24, MP_QSTR_x25, MP_QSTR_x26, MP_QSTR_x27, MP_QSTR_x28, MP_QSTR_x29, MP_QSTR_x30, MP_QSTR_x31,
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+static inline void emit_inline_rv32_error_msg(emit_inline_asm_t *emit, mp_rom_error_text_t msg) {
+ *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+}
+
+static inline void emit_inline_rv32_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) {
+ *emit->error_slot = exc;
+}
+
+emit_inline_asm_t *emit_inline_rv32_new(mp_uint_t max_num_labels) {
+ emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t);
+ memset(&emit->as, 0, sizeof(emit->as));
+ mp_asm_base_init(&emit->as.base, max_num_labels);
+ emit->max_num_labels = max_num_labels;
+ emit->label_lookup = m_new(qstr, max_num_labels);
+ return emit;
+}
+
+void emit_inline_rv32_free(emit_inline_asm_t *emit) {
+ m_del(qstr, emit->label_lookup, emit->max_num_labels);
+ mp_asm_base_deinit(&emit->as.base, false);
+ m_del_obj(emit_inline_asm_t, emit);
+}
+
+static void emit_inline_rv32_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) {
+ emit->pass = pass;
+ emit->error_slot = error_slot;
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr));
+ }
+ mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+}
+
+static void emit_inline_rv32_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) {
+ // c.jr ra
+ asm_rv32_opcode_cjr(&emit->as, ASM_RV32_REG_RA);
+ asm_rv32_end_pass(&emit->as);
+}
+
+static bool parse_register_node(mp_parse_node_t node, mp_uint_t *register_number, bool compressed) {
+ assert(register_number != NULL && "Register number pointer is NULL.");
+
+ if (!MP_PARSE_NODE_IS_ID(node)) {
+ return false;
+ }
+
+ qstr node_qstr = MP_PARSE_NODE_LEAF_ARG(node);
+ for (mp_uint_t index = 0; index < MP_ARRAY_SIZE(REGISTERS_QSTR_TABLE); index++) {
+ if (node_qstr == REGISTERS_QSTR_TABLE[index]) {
+ mp_uint_t number = index % RV32_AVAILABLE_REGISTERS_COUNT;
+ if (!compressed || (compressed && RV32_IS_IN_C_REGISTER_WINDOW(number))) {
+ *register_number = compressed ? RV32_MAP_IN_C_REGISTER_WINDOW(number) : number;
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+static mp_uint_t lookup_label(emit_inline_asm_t *emit, mp_parse_node_t node, qstr *qstring) {
+ assert(qstring && "qstring pointer is NULL");
+
+ *qstring = MP_PARSE_NODE_LEAF_ARG(node);
+ for (mp_uint_t label = 0; label < emit->max_num_labels; label++) {
+ if (emit->label_lookup[label] == *qstring) {
+ return label;
+ }
+ }
+
+ return emit->max_num_labels;
+}
+
+static inline ptrdiff_t label_code_offset(emit_inline_asm_t *emit, mp_uint_t label_index) {
+ return emit->as.base.label_offsets[label_index] - emit->as.base.code_offset;
+}
+
+static mp_uint_t emit_inline_rv32_count_params(emit_inline_asm_t *emit, mp_uint_t parameters_count, mp_parse_node_t *parameter_nodes) {
+ // TODO: Raise this up to 8? RV32I has 8 A-registers that are meant to
+ // be used for passing arguments.
+
+ if (parameters_count > 4) {
+ emit_inline_rv32_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters for RV32 assembly"));
+ return 0;
+ }
+
+ mp_uint_t register_index = 0;
+ for (mp_uint_t index = 0; index < parameters_count; index++) {
+ bool valid_register = parse_register_node(parameter_nodes[index], &register_index, false);
+ if (!valid_register || (register_index != (ASM_RV32_REG_A0 + index))) {
+ emit_inline_rv32_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a0 to a3"));
+ return 0;
+ }
+ }
+
+ return parameters_count;
+}
+
+static bool emit_inline_rv32_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) {
+ assert(label_num < emit->max_num_labels);
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ for (mp_uint_t index = 0; index < emit->max_num_labels; index++) {
+ if (emit->label_lookup[index] == label_id) {
+ return false;
+ }
+ }
+ }
+ emit->label_lookup[label_num] = label_id;
+ mp_asm_base_label_assign(&emit->as.base, label_num);
+ return true;
+}
+
+#define CALL_RRR 0 // Register, Register, Register
+#define CALL_RR 1 // Register, Register
+#define CALL_RRI 2 // Register, Register, Immediate
+#define CALL_RRL 3 // Register, Register, Label
+#define CALL_RI 4 // Register, Immediate
+#define CALL_L 5 // Label
+#define CALL_R 6 // Register
+#define CALL_RL 7 // Register, Label
+#define CALL_N 8 // No arguments
+#define CALL_I 9 // Immediate
+#define CALL_RII 10 // Register, Immediate, Immediate
+#define CALL_RIR 11 // Register, Immediate(Register)
+
+#define N 0 // No argument
+#define R 1 // Register
+#define I 2 // Immediate
+#define L 3 // Label
+#define C (1 << 2) // Compressed register
+#define U (1 << 2) // Unsigned immediate
+#define Z (1 << 3) // Non-zero
+
+typedef void (*call_l_t)(asm_rv32_t *state, mp_uint_t label_index);
+typedef void (*call_ri_t)(asm_rv32_t *state, mp_uint_t rd, mp_int_t immediate);
+typedef void (*call_rri_t)(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_int_t immediate);
+typedef void (*call_rii_t)(asm_rv32_t *state, mp_uint_t rd, mp_uint_t immediate1, mp_int_t immediate2);
+typedef void (*call_rrr_t)(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs1, mp_uint_t rs2);
+typedef void (*call_rr_t)(asm_rv32_t *state, mp_uint_t rd, mp_uint_t rs);
+typedef void (*call_i_t)(asm_rv32_t *state, mp_int_t immediate);
+typedef void (*call_r_t)(asm_rv32_t *state, mp_uint_t rd);
+typedef void (*call_n_t)(asm_rv32_t *state);
+
+typedef struct _opcode_t {
+ qstr qstring;
+ void *emitter;
+ uint32_t calling_convention : 4;
+ uint32_t argument1_kind : 4;
+ uint32_t argument1_shift : 5;
+ uint32_t argument2_kind : 4;
+ uint32_t argument2_shift : 5;
+ uint32_t argument3_kind : 4;
+ uint32_t argument3_shift : 5;
+ uint32_t argument1_mask;
+ uint32_t argument2_mask;
+ uint32_t argument3_mask;
+} opcode_t;
+
+#define opcode_li asm_rv32_emit_optimised_load_immediate
+
+static void opcode_la(asm_rv32_t *state, mp_uint_t rd, mp_int_t displacement) {
+ // This cannot be optimised for size, otherwise label addresses would move around.
+ mp_uint_t upper = (mp_uint_t)displacement & 0xFFFFF000;
+ mp_uint_t lower = (mp_uint_t)displacement & 0x00000FFF;
+ if ((lower & 0x800) != 0) {
+ upper += 0x1000;
+ }
+ asm_rv32_opcode_auipc(state, rd, upper);
+ asm_rv32_opcode_addi(state, rd, rd, lower);
+}
+
+#define RC (R | C)
+#define IU (I | U)
+#define IZ (I | Z)
+#define IUZ (I | U | Z)
+
+static const opcode_t OPCODES[] = {
+ { MP_QSTR_add, asm_rv32_opcode_add, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_addi, asm_rv32_opcode_addi, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_and_, asm_rv32_opcode_and, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_andi, asm_rv32_opcode_andi, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_auipc, asm_rv32_opcode_auipc, CALL_RI, R, 0, I, 12, N, 0, 0xFFFFFFFF, 0xFFFFF000, 0x00000000 },
+ { MP_QSTR_beq, asm_rv32_opcode_beq, CALL_RRL, R, 0, R, 0, L, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00001FFE },
+ { MP_QSTR_bge, asm_rv32_opcode_bge, CALL_RRL, R, 0, R, 0, L, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00001FFE },
+ { MP_QSTR_bgeu, asm_rv32_opcode_bgeu, CALL_RRL, R, 0, R, 0, L, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00001FFE },
+ { MP_QSTR_blt, asm_rv32_opcode_blt, CALL_RRL, R, 0, R, 0, L, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00001FFE },
+ { MP_QSTR_bltu, asm_rv32_opcode_bltu, CALL_RRL, R, 0, R, 0, L, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00001FFE },
+ { MP_QSTR_bne, asm_rv32_opcode_bne, CALL_RRL, R, 0, R, 0, L, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00001FFE },
+ { MP_QSTR_csrrc, asm_rv32_opcode_csrrc, CALL_RRI, R, 0, R, 0, IU, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_csrrs, asm_rv32_opcode_csrrs, CALL_RRI, R, 0, R, 0, IU, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_csrrw, asm_rv32_opcode_csrrw, CALL_RRI, R, 0, R, 0, IU, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_csrrci, asm_rv32_opcode_csrrci, CALL_RII, R, 0, IU, 0, IU, 0, 0xFFFFFFFF, 0x00000FFF, 0x0000001F },
+ { MP_QSTR_csrrsi, asm_rv32_opcode_csrrsi, CALL_RII, R, 0, IU, 0, IU, 0, 0xFFFFFFFF, 0x00000FFF, 0x0000001F },
+ { MP_QSTR_csrrwi, asm_rv32_opcode_csrrwi, CALL_RII, R, 0, IU, 0, IU, 0, 0xFFFFFFFF, 0x00000FFF, 0x0000001F },
+ { MP_QSTR_c_add, asm_rv32_opcode_cadd, CALL_RR, R, 0, R, 0, N, 0, 0xFFFFFFFE, 0xFFFFFFFE, 0x00000000 },
+ { MP_QSTR_c_addi, asm_rv32_opcode_caddi, CALL_RI, R, 0, IZ, 0, N, 0, 0xFFFFFFFE, 0x0000003F, 0x00000000 },
+ { MP_QSTR_c_addi4spn, asm_rv32_opcode_caddi4spn, CALL_RI, R, 0, IUZ, 0, N, 0, 0x0000FF00, 0x000003FC, 0x00000000 },
+ { MP_QSTR_c_and, asm_rv32_opcode_cand, CALL_RR, RC, 0, RC, 0, N, 0, 0x0000FF00, 0x0000FF00, 0x00000000 },
+ { MP_QSTR_c_andi, asm_rv32_opcode_candi, CALL_RI, RC, 0, I, 0, N, 0, 0x0000FF00, 0x0000003F, 0x00000000 },
+ { MP_QSTR_c_beqz, asm_rv32_opcode_cbeqz, CALL_RL, RC, 0, L, 0, N, 0, 0x0000FF00, 0x000001FE, 0x00000000 },
+ { MP_QSTR_c_bnez, asm_rv32_opcode_cbnez, CALL_RL, RC, 0, L, 0, N, 0, 0x0000FF00, 0x000001FE, 0x00000000 },
+ { MP_QSTR_c_ebreak, asm_rv32_opcode_cebreak, CALL_N, N, 0, N, 0, N, 0, 0x00000000, 0x00000000, 0x00000000 },
+ { MP_QSTR_c_j, asm_rv32_opcode_cj, CALL_L, L, 0, N, 0, N, 0, 0x00000FFE, 0x00000000, 0x00000000 },
+ { MP_QSTR_c_jal, asm_rv32_opcode_cjal, CALL_L, L, 0, N, 0, N, 0, 0x00000FFE, 0x00000000, 0x00000000 },
+ { MP_QSTR_c_jalr, asm_rv32_opcode_cjalr, CALL_R, R, 0, N, 0, N, 0, 0xFFFFFFFE, 0x00000000, 0x00000000 },
+ { MP_QSTR_c_jr, asm_rv32_opcode_cjr, CALL_R, R, 0, N, 0, N, 0, 0xFFFFFFFE, 0x00000000, 0x00000000 },
+ { MP_QSTR_c_li, asm_rv32_opcode_cli, CALL_RI, R, 0, I, 0, N, 0, 0xFFFFFFFE, 0x0000003F, 0x00000000 },
+ { MP_QSTR_c_lui, asm_rv32_opcode_clui, CALL_RI, R, 0, IUZ, 12, N, 0, 0xFFFFFFFA, 0x0001F800, 0x00000000 },
+ { MP_QSTR_c_lw, asm_rv32_opcode_clw, CALL_RIR, RC, 0, I, 0, RC, 0, 0x0000FF00, 0x0000007C, 0x0000FF00 },
+ { MP_QSTR_c_lwsp, asm_rv32_opcode_clwsp, CALL_RI, R, 0, I, 0, N, 0, 0xFFFFFFFE, 0x000000FC, 0x00000000 },
+ { MP_QSTR_c_mv, asm_rv32_opcode_cmv, CALL_RR, R, 0, R, 0, N, 0, 0xFFFFFFFE, 0xFFFFFFFE, 0x00000000 },
+ { MP_QSTR_c_nop, asm_rv32_opcode_cnop, CALL_N, N, 0, N, 0, N, 0, 0x00000000, 0x00000000, 0x00000000 },
+ { MP_QSTR_c_or, asm_rv32_opcode_cor, CALL_RR, RC, 0, RC, 0, N, 0, 0x0000FF00, 0x0000FF00, 0x00000000 },
+ { MP_QSTR_c_slli, asm_rv32_opcode_cslli, CALL_RI, R, 0, IU, 0, N, 0, 0xFFFFFFFE, 0x0000001F, 0x00000000 },
+ { MP_QSTR_c_srai, asm_rv32_opcode_csrai, CALL_RI, RC, 0, IU, 0, N, 0, 0x0000FF00, 0x0000001F, 0x00000000 },
+ { MP_QSTR_c_srli, asm_rv32_opcode_csrli, CALL_RI, RC, 0, IU, 0, N, 0, 0x0000FF00, 0x0000001F, 0x00000000 },
+ { MP_QSTR_c_sub, asm_rv32_opcode_csub, CALL_RR, RC, 0, RC, 0, N, 0, 0x0000FF00, 0x0000FF00, 0x00000000 },
+ { MP_QSTR_c_sw, asm_rv32_opcode_csw, CALL_RIR, RC, 0, I, 0, RC, 0, 0x0000FF00, 0x0000007C, 0x0000FF00 },
+ { MP_QSTR_c_swsp, asm_rv32_opcode_cswsp, CALL_RI, R, 0, I, 0, N, 0, 0xFFFFFFFF, 0x000000FC, 0x00000000 },
+ { MP_QSTR_c_xor, asm_rv32_opcode_cxor, CALL_RR, RC, 0, RC, 0, N, 0, 0x0000FF00, 0x0000FF00, 0x00000000 },
+ { MP_QSTR_div, asm_rv32_opcode_div, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_divu, asm_rv32_opcode_divu, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_ebreak, asm_rv32_opcode_ebreak, CALL_N, N, 0, N, 0, N, 0, 0x00000000, 0x00000000, 0x00000000 },
+ { MP_QSTR_ecall, asm_rv32_opcode_ecall, CALL_N, N, 0, N, 0, N, 0, 0x00000000, 0x00000000, 0x00000000 },
+ { MP_QSTR_jal, asm_rv32_opcode_jal, CALL_RL, R, 0, L, 0, N, 0, 0xFFFFFFFF, 0x001FFFFE, 0x00000000 },
+ { MP_QSTR_jalr, asm_rv32_opcode_jalr, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_la, opcode_la, CALL_RL, R, 0, L, 0, N, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 },
+ { MP_QSTR_lb, asm_rv32_opcode_lb, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_lbu, asm_rv32_opcode_lbu, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_lh, asm_rv32_opcode_lh, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_lhu, asm_rv32_opcode_lhu, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_li, opcode_li, CALL_RI, R, 0, I, 0, N, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 },
+ { MP_QSTR_lui, asm_rv32_opcode_lui, CALL_RI, R, 0, I, 12, N, 0, 0xFFFFFFFF, 0xFFFFF000, 0x00000000 },
+ { MP_QSTR_lw, asm_rv32_opcode_lw, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_mv, asm_rv32_opcode_cmv, CALL_RR, R, 0, R, 0, N, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 },
+ { MP_QSTR_mul, asm_rv32_opcode_mul, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_mulh, asm_rv32_opcode_mulh, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_mulhsu, asm_rv32_opcode_mulhsu, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_mulhu, asm_rv32_opcode_mulhu, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_or_, asm_rv32_opcode_or, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_ori, asm_rv32_opcode_ori, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_rem, asm_rv32_opcode_rem, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_remu, asm_rv32_opcode_remu, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_sb, asm_rv32_opcode_sb, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_sh, asm_rv32_opcode_sh, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_sll, asm_rv32_opcode_sll, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_slli, asm_rv32_opcode_slli, CALL_RRI, R, 0, R, 0, IU, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x0000001F },
+ { MP_QSTR_slt, asm_rv32_opcode_slt, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_slti, asm_rv32_opcode_slti, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_sltiu, asm_rv32_opcode_sltiu, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+ { MP_QSTR_sltu, asm_rv32_opcode_sltu, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_sra, asm_rv32_opcode_sra, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_srai, asm_rv32_opcode_srai, CALL_RRI, R, 0, R, 0, IU, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x0000001F },
+ { MP_QSTR_srl, asm_rv32_opcode_srl, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_srli, asm_rv32_opcode_srli, CALL_RRI, R, 0, R, 0, IU, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x0000001F },
+ { MP_QSTR_sub, asm_rv32_opcode_sub, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_sw, asm_rv32_opcode_sw, CALL_RIR, R, 0, I, 0, R, 0, 0xFFFFFFFF, 0x00000FFF, 0xFFFFFFFF },
+ { MP_QSTR_xor, asm_rv32_opcode_xor, CALL_RRR, R, 0, R, 0, R, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { MP_QSTR_xori, asm_rv32_opcode_xori, CALL_RRI, R, 0, R, 0, I, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000FFF },
+};
+
+#undef RC
+#undef IU
+#undef IZ
+#undef IUZ
+
+// These two checks assume the bitmasks are contiguous.
+
+static bool is_in_signed_mask(mp_uint_t mask, mp_uint_t value) {
+ mp_uint_t leading_zeroes = mp_clz(mask);
+ if (leading_zeroes == 0 || leading_zeroes > 32) {
+ return true;
+ }
+ mp_uint_t positive_mask = ~(mask & ~(1U << (31 - leading_zeroes)));
+ if ((value & positive_mask) == 0) {
+ return true;
+ }
+ mp_uint_t negative_mask = ~(mask >> 1);
+ mp_uint_t trailing_zeroes = mp_ctz(mask);
+ if (trailing_zeroes > 0) {
+ mp_uint_t trailing_mask = (1U << trailing_zeroes) - 1;
+ if ((value & trailing_mask) != 0) {
+ return false;
+ }
+ negative_mask &= ~trailing_mask;
+ }
+ return (value & negative_mask) == negative_mask;
+}
+
+static inline bool is_in_unsigned_mask(mp_uint_t mask, mp_uint_t value) {
+ return (value & ~mask) == 0;
+}
+
+static bool validate_integer(mp_uint_t value, mp_uint_t mask, mp_uint_t flags) {
+ if (flags & U) {
+ if (!is_in_unsigned_mask(mask, value)) {
+ return false;
+ }
+ } else {
+ if (!is_in_signed_mask(mask, value)) {
+ return false;
+ }
+ }
+
+ if ((flags & Z) && (value == 0)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_argument(emit_inline_asm_t *emit, qstr opcode_qstr,
+ const opcode_t *opcode, mp_parse_node_t node, mp_uint_t node_index) {
+ assert((node_index < 3) && "Invalid argument node number.");
+
+ uint32_t kind = 0;
+ uint32_t shift = 0;
+ uint32_t mask = 0;
+
+ switch (node_index) {
+ case 0:
+ kind = opcode->argument1_kind;
+ shift = opcode->argument1_shift;
+ mask = opcode->argument1_mask;
+ break;
+
+ case 1:
+ kind = opcode->argument2_kind;
+ shift = opcode->argument2_shift;
+ mask = opcode->argument2_mask;
+ break;
+
+ case 2:
+ kind = opcode->argument3_kind;
+ shift = opcode->argument3_shift;
+ mask = opcode->argument3_mask;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (kind & 0x03) {
+ case N:
+ return true;
+
+ case R: {
+ mp_uint_t register_index;
+ if (!parse_register_node(node, &register_index, false)) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d must be a register"),
+ opcode_qstr, node_index + 1));
+ return false;
+ }
+
+ if ((mask & (1U << register_index)) == 0) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d is an invalid register"),
+ opcode_qstr, node_index + 1));
+ return false;
+ }
+
+ return true;
+ }
+ break;
+
+ case I: {
+ mp_obj_t object;
+ if (!mp_parse_node_get_int_maybe(node, &object)) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d must be an integer"),
+ opcode_qstr, node_index + 1));
+ return false;
+ }
+
+ mp_uint_t immediate = mp_obj_get_int_truncated(object) << shift;
+ if (kind & U) {
+ if (!is_in_unsigned_mask(mask, immediate)) {
+ goto out_of_range;
+ }
+ } else {
+ if (!is_in_signed_mask(mask, immediate)) {
+ goto out_of_range;
+ }
+ }
+
+ if ((kind & Z) && (immediate == 0)) {
+ goto zero_immediate;
+ }
+
+ return true;
+ }
+ break;
+
+ case L: {
+ if (!MP_PARSE_NODE_IS_ID(node)) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d must be a label"),
+ opcode_qstr, node_index + 1));
+ return false;
+ }
+
+ qstr qstring;
+ mp_uint_t label_index = lookup_label(emit, node, &qstring);
+ if (label_index >= emit->max_num_labels && emit->pass == MP_PASS_EMIT) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d label '%q' is undefined"),
+ opcode_qstr, node_index + 1, qstring));
+ return false;
+ }
+
+ mp_uint_t displacement = (mp_uint_t)(label_code_offset(emit, label_index));
+ if (kind & U) {
+ if (!is_in_unsigned_mask(mask, displacement)) {
+ goto out_of_range;
+ }
+ } else {
+ if (!is_in_signed_mask(mask, displacement)) {
+ goto out_of_range;
+ }
+ }
+ return true;
+ }
+ break;
+
+ default:
+ assert(!"Unknown argument kind");
+ break;
+ }
+
+ return false;
+
+out_of_range:
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d is out of range"),
+ opcode_qstr, node_index + 1));
+ return false;
+
+zero_immediate:
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d must not be zero"),
+ opcode_qstr, node_index + 1));
+ return false;
+}
+
+static bool parse_register_offset_node(emit_inline_asm_t *emit, qstr opcode_qstr, const opcode_t *opcode_data, mp_parse_node_t node, mp_uint_t node_index, mp_parse_node_t *register_node, mp_parse_node_t *offset_node, bool *negative) {
+ assert(register_node != NULL && "Register node pointer is NULL.");
+ assert(offset_node != NULL && "Offset node pointer is NULL.");
+ assert(negative != NULL && "Negative pointer is NULL.");
+
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(node, PN_atom_expr_normal) && !MP_PARSE_NODE_IS_STRUCT_KIND(node, PN_factor_2)) {
+ goto invalid_structure;
+ }
+ mp_parse_node_struct_t *node_struct = (mp_parse_node_struct_t *)node;
+ *negative = false;
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(node, PN_factor_2)) {
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(node_struct->nodes[0], MP_TOKEN_OP_MINUS)) {
+ *negative = true;
+ } else {
+ if (!MP_PARSE_NODE_IS_TOKEN_KIND(node_struct->nodes[0], MP_TOKEN_OP_PLUS)) {
+ goto invalid_structure;
+ }
+ }
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(node_struct->nodes[1], PN_atom_expr_normal)) {
+ goto invalid_structure;
+ }
+ node_struct = (mp_parse_node_struct_t *)node_struct->nodes[1];
+ }
+
+ if (*negative) {
+ // If the value is negative, RULE_atom_expr_normal's first token will be the
+ // offset stripped of its negative marker; range check will then fail if the
+ // default method is used, so a custom check is used instead.
+ mp_obj_t object;
+ if (!mp_parse_node_get_int_maybe(node_struct->nodes[0], &object)) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d must be an integer"),
+ opcode_qstr, 2));
+ return false;
+ }
+ mp_uint_t value = mp_obj_get_int_truncated(object);
+ value = (~value + 1) & (mp_uint_t)-1;
+ if (!validate_integer(value << opcode_data->argument2_shift, opcode_data->argument2_mask, opcode_data->argument2_kind)) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d is out of range"),
+ opcode_qstr, 2));
+ return false;
+ }
+ } else {
+ if (!validate_argument(emit, opcode_qstr, opcode_data, node_struct->nodes[0], 1)) {
+ return false;
+ }
+ }
+
+ *offset_node = node_struct->nodes[0];
+ node_struct = (mp_parse_node_struct_t *)node_struct->nodes[1];
+ if (!validate_argument(emit, opcode_qstr, opcode_data, node_struct->nodes[0], 2)) {
+ return false;
+ }
+ *register_node = node_struct->nodes[0];
+ return true;
+
+invalid_structure:
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d must be an integer offset to a register"),
+ opcode_qstr, node_index + 1));
+ return false;
+}
+
+static void handle_opcode(emit_inline_asm_t *emit, qstr opcode, const opcode_t *opcode_data, mp_parse_node_t *arguments) {
+ mp_uint_t rd = 0;
+ mp_uint_t rs1 = 0;
+ mp_uint_t rs2 = 0;
+
+ switch (opcode_data->calling_convention) {
+ case CALL_RRR: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ parse_register_node(arguments[1], &rs1, opcode_data->argument2_kind & C);
+ parse_register_node(arguments[2], &rs2, opcode_data->argument3_kind & C);
+ ((call_rrr_t)opcode_data->emitter)(&emit->as, rd, rs1, rs2);
+ break;
+ }
+
+ case CALL_RR: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ parse_register_node(arguments[1], &rs1, opcode_data->argument2_kind & C);
+ ((call_rr_t)opcode_data->emitter)(&emit->as, rd, rs1);
+ break;
+ }
+
+ case CALL_RRI: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ parse_register_node(arguments[1], &rs1, opcode_data->argument2_kind & C);
+ mp_obj_t object;
+ mp_parse_node_get_int_maybe(arguments[2], &object);
+ mp_uint_t immediate = mp_obj_get_int_truncated(object) << opcode_data->argument3_shift;
+ ((call_rri_t)opcode_data->emitter)(&emit->as, rd, rs1, immediate);
+ break;
+ }
+
+ case CALL_RI: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ mp_obj_t object;
+ mp_parse_node_get_int_maybe(arguments[1], &object);
+ mp_uint_t immediate = mp_obj_get_int_truncated(object) << opcode_data->argument2_shift;
+ ((call_ri_t)opcode_data->emitter)(&emit->as, rd, immediate);
+ break;
+ }
+
+ case CALL_R: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ ((call_r_t)opcode_data->emitter)(&emit->as, rd);
+ break;
+ }
+
+ case CALL_RRL: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ parse_register_node(arguments[1], &rs1, opcode_data->argument2_kind & C);
+ qstr qstring;
+ mp_uint_t label_index = lookup_label(emit, arguments[2], &qstring);
+ ptrdiff_t displacement = label_code_offset(emit, label_index);
+ ((call_rri_t)opcode_data->emitter)(&emit->as, rd, rs1, displacement);
+ break;
+ }
+
+ case CALL_RL: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ qstr qstring;
+ mp_uint_t label_index = lookup_label(emit, arguments[1], &qstring);
+ ptrdiff_t displacement = label_code_offset(emit, label_index);
+ ((call_ri_t)opcode_data->emitter)(&emit->as, rd, displacement);
+ break;
+ }
+
+ case CALL_L: {
+ qstr qstring;
+ mp_uint_t label_index = lookup_label(emit, arguments[0], &qstring);
+ ptrdiff_t displacement = label_code_offset(emit, label_index);
+ ((call_i_t)opcode_data->emitter)(&emit->as, displacement);
+ break;
+ }
+
+ case CALL_N:
+ ((call_n_t)opcode_data->emitter)(&emit->as);
+ break;
+
+ case CALL_I: {
+ mp_obj_t object;
+ mp_parse_node_get_int_maybe(arguments[0], &object);
+ mp_uint_t immediate = mp_obj_get_int_truncated(object) << opcode_data->argument1_shift;
+ ((call_i_t)opcode_data->emitter)(&emit->as, immediate);
+ break;
+ }
+
+ case CALL_RII: {
+ parse_register_node(arguments[0], &rd, opcode_data->argument1_kind & C);
+ mp_obj_t object;
+ mp_parse_node_get_int_maybe(arguments[1], &object);
+ mp_uint_t immediate1 = mp_obj_get_int_truncated(object) << opcode_data->argument2_shift;
+ mp_parse_node_get_int_maybe(arguments[2], &object);
+ mp_uint_t immediate2 = mp_obj_get_int_truncated(object) << opcode_data->argument3_shift;
+ ((call_rii_t)opcode_data->emitter)(&emit->as, rd, immediate1, immediate2);
+ break;
+ }
+
+ case CALL_RIR:
+ assert(!"Should not get here.");
+ break;
+
+ default:
+ assert(!"Unhandled call convention.");
+ break;
+ }
+}
+
+static bool handle_load_store_opcode_with_offset(emit_inline_asm_t *emit, qstr opcode, const opcode_t *opcode_data, mp_parse_node_t *argument_nodes) {
+ mp_parse_node_t nodes[3] = {0};
+ if (!validate_argument(emit, opcode, opcode_data, argument_nodes[0], 0)) {
+ return false;
+ }
+ nodes[0] = argument_nodes[0];
+ bool negative = false;
+ if (!parse_register_offset_node(emit, opcode, opcode_data, argument_nodes[1], 1, &nodes[1], &nodes[2], &negative)) {
+ return false;
+ }
+
+ mp_uint_t rd;
+ mp_uint_t rs1;
+ parse_register_node(nodes[0], &rd, opcode_data->argument1_kind & C);
+ if (!parse_register_node(nodes[1], &rs1, opcode_data->argument3_kind & C)) {
+ return false;
+ }
+
+ mp_obj_t object;
+ mp_parse_node_get_int_maybe(nodes[2], &object);
+ mp_uint_t immediate = mp_obj_get_int_truncated(object) << opcode_data->argument2_shift;
+ if (negative) {
+ immediate = (~immediate + 1) & (mp_uint_t)-1;
+ }
+ if (!is_in_signed_mask(opcode_data->argument2_mask, immediate)) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("opcode '%q' argument %d is out of range"),
+ opcode, 2));
+ return false;
+ }
+
+ ((call_rri_t)opcode_data->emitter)(&emit->as, rd, rs1, immediate);
+ return true;
+}
+
+static void emit_inline_rv32_opcode(emit_inline_asm_t *emit, qstr opcode, mp_uint_t arguments_count, mp_parse_node_t *argument_nodes) {
+ const opcode_t *opcode_data = NULL;
+ for (mp_uint_t index = 0; index < MP_ARRAY_SIZE(OPCODES); index++) {
+ if (OPCODES[index].qstring == opcode) {
+ opcode_data = &OPCODES[index];
+ break;
+ }
+ }
+
+ if (!opcode_data) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("unsupported or unknown RV32 instruction '%q'."), opcode));
+ return;
+ }
+
+ size_t required_arguments = 0;
+ if (opcode_data->argument1_kind != N) {
+ required_arguments++;
+ }
+ if (opcode_data->argument2_kind != N) {
+ required_arguments++;
+ }
+ if (opcode_data->argument3_kind != N) {
+ required_arguments++;
+ }
+
+ if (opcode_data->calling_convention != CALL_RIR) {
+ if (required_arguments != arguments_count) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("RV32 instruction '%q' requires %d arguments."), opcode, required_arguments));
+ return;
+ }
+ if (required_arguments >= 1 && !validate_argument(emit, opcode, opcode_data, argument_nodes[0], 0)) {
+ return;
+ }
+ if (required_arguments >= 2 && !validate_argument(emit, opcode, opcode_data, argument_nodes[1], 1)) {
+ return;
+ }
+ if (required_arguments >= 3 && !validate_argument(emit, opcode, opcode_data, argument_nodes[2], 2)) {
+ return;
+ }
+ handle_opcode(emit, opcode, opcode_data, argument_nodes);
+ return;
+ }
+
+ assert(required_arguments == 3 && "Invalid arguments count for calling convention.");
+ assert((opcode_data->argument2_kind & U) == 0 && "Offset must not be unsigned.");
+ assert((opcode_data->argument2_kind & Z) == 0 && "Offset can be zero.");
+
+ if (arguments_count != 2) {
+ emit_inline_rv32_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("RV32 instruction '%q' requires %d arguments."), opcode, 2));
+ return;
+ }
+
+ handle_load_store_opcode_with_offset(emit, opcode, opcode_data, argument_nodes);
+}
+
+#undef N
+#undef R
+#undef I
+#undef L
+#undef C
+#undef U
+
+const emit_inline_asm_method_table_t emit_inline_rv32_method_table = {
+ #if MICROPY_DYNAMIC_COMPILER
+ emit_inline_rv32_new,
+ emit_inline_rv32_free,
+ #endif
+
+ emit_inline_rv32_start_pass,
+ emit_inline_rv32_end_pass,
+ emit_inline_rv32_count_params,
+ emit_inline_rv32_label,
+ emit_inline_rv32_opcode,
+};
+
+#endif // MICROPY_EMIT_INLINE_RV32
diff --git a/py/mpconfig.h b/py/mpconfig.h
index 8598eaa5b..e84d258a1 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -411,6 +411,11 @@
#define MICROPY_EMIT_RV32 (0)
#endif
+// Whether to enable the RISC-V RV32 inline assembler
+#ifndef MICROPY_EMIT_INLINE_RV32
+#define MICROPY_EMIT_INLINE_RV32 (0)
+#endif
+
// Convenience definition for whether any native emitter is enabled
#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA || MICROPY_EMIT_XTENSAWIN || MICROPY_EMIT_RV32 || MICROPY_EMIT_NATIVE_DEBUG)
@@ -420,7 +425,7 @@
#define MICROPY_EMIT_NATIVE_PRELUDE_SEPARATE_FROM_MACHINE_CODE (MICROPY_EMIT_XTENSAWIN)
// Convenience definition for whether any inline assembler emitter is enabled
-#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB || MICROPY_EMIT_INLINE_XTENSA)
+#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB || MICROPY_EMIT_INLINE_XTENSA || MICROPY_EMIT_INLINE_RV32)
// Convenience definition for whether any native or inline assembler emitter is enabled
#define MICROPY_EMIT_MACHINE_CODE (MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM)
diff --git a/py/py.cmake b/py/py.cmake
index dd94f6a59..0fee74ddc 100644
--- a/py/py.cmake
+++ b/py/py.cmake
@@ -24,6 +24,7 @@ set(MICROPY_SOURCE_PY
${MICROPY_PY_DIR}/emitbc.c
${MICROPY_PY_DIR}/emitcommon.c
${MICROPY_PY_DIR}/emitglue.c
+ ${MICROPY_PY_DIR}/emitinlinerv32.c
${MICROPY_PY_DIR}/emitinlinethumb.c
${MICROPY_PY_DIR}/emitinlinextensa.c
${MICROPY_PY_DIR}/emitnarm.c
diff --git a/py/py.mk b/py/py.mk
index 9592fbb91..c0b7e1ac8 100644
--- a/py/py.mk
+++ b/py/py.mk
@@ -123,6 +123,7 @@ PY_CORE_O_BASENAME = $(addprefix py/,\
emitnxtensawin.o \
asmrv32.o \
emitnrv32.o \
+ emitinlinerv32.o \
emitndebug.o \
formatfloat.o \
parsenumbase.o \
diff --git a/tests/inlineasm/rv32/asmargs.py b/tests/inlineasm/rv32/asmargs.py
new file mode 100644
index 000000000..78afd5111
--- /dev/null
+++ b/tests/inlineasm/rv32/asmargs.py
@@ -0,0 +1,44 @@
+# test passing arguments
+
+
+@micropython.asm_rv32
+def arg0():
+ c_li(a0, 1)
+
+
+print(arg0())
+
+
+@micropython.asm_rv32
+def arg1(a0):
+ addi(a0, a0, 1)
+
+
+print(arg1(1))
+
+
+@micropython.asm_rv32
+def arg2(a0, a1):
+ add(a0, a0, a1)
+
+
+print(arg2(1, 2))
+
+
+@micropython.asm_rv32
+def arg3(a0, a1, a2):
+ add(a0, a0, a1)
+ add(a0, a0, a2)
+
+
+print(arg3(1, 2, 3))
+
+
+@micropython.asm_rv32
+def arg4(a0, a1, a2, a3):
+ add(a0, a0, a1)
+ add(a0, a0, a2)
+ add(a0, a0, a3)
+
+
+print(arg4(1, 2, 3, 4))
diff --git a/tests/inlineasm/rv32/asmargs.py.exp b/tests/inlineasm/rv32/asmargs.py.exp
new file mode 100644
index 000000000..e33a6964f
--- /dev/null
+++ b/tests/inlineasm/rv32/asmargs.py.exp
@@ -0,0 +1,5 @@
+1
+2
+3
+6
+10
diff --git a/tests/inlineasm/rv32/asmarith.py b/tests/inlineasm/rv32/asmarith.py
new file mode 100644
index 000000000..8b864c0b3
--- /dev/null
+++ b/tests/inlineasm/rv32/asmarith.py
@@ -0,0 +1,79 @@
+# test arithmetic opcodes
+
+
+@micropython.asm_rv32
+def f1():
+ li(a0, 0x100)
+ li(a1, 1)
+ add(a0, a0, a1)
+ addi(a0, a0, 1)
+ addi(a0, a0, -2)
+ sub(a0, a0, a1)
+ c_add(a0, a1)
+ c_addi(a0, -1)
+ c_sub(a0, a1)
+
+
+print(hex(f1()))
+
+
+@micropython.asm_rv32
+def f2():
+ li(a0, 0x10FF)
+ li(a1, 1)
+ and_(a2, a0, a1)
+ andi(a3, a0, 0x10)
+ or_(a2, a2, a3)
+ ori(a2, a2, 8)
+ li(a1, 0x200)
+ c_or(a2, a1)
+ li(a1, 0xF0)
+ mv(a0, a2)
+ c_and(a0, a1)
+ li(a1, 0x101)
+ xor(a0, a0, a1)
+ xori(a0, a0, 0x101)
+ c_xor(a0, a1)
+
+
+print(hex(f2()))
+
+
+@micropython.asm_rv32
+def f3(a0, a1):
+ slt(a0, a0, a1)
+
+
+print(f3(0xFFFFFFF0, 0xFFFFFFF1))
+print(f3(0x0, 0xFFFFFFF1))
+print(f3(0xFFFFFFF1, 0xFFFFFFF1))
+print(f3(0xFFFFFFF1, 0xFFFFFFF0))
+
+
+@micropython.asm_rv32
+def f4(a0, a1):
+ sltu(a0, a0, a1)
+
+
+print(f3(0xFFFFFFF0, 0xFFFFFFF1))
+print(f3(0x0, 0xFFFFFFF1))
+print(f3(0xFFFFFFF1, 0xFFFFFFF1))
+print(f3(0xFFFFFFF1, 0xFFFFFFF0))
+
+
+@micropython.asm_rv32
+def f5(a0):
+ slti(a0, a0, -2)
+
+
+print(f5(-1))
+print(f5(-3))
+
+
+@micropython.asm_rv32
+def f6(a0):
+ sltiu(a0, a0, -2)
+
+
+print(f6(-1))
+print(f6(-3))
diff --git a/tests/inlineasm/rv32/asmarith.py.exp b/tests/inlineasm/rv32/asmarith.py.exp
new file mode 100644
index 000000000..7da4dd5c9
--- /dev/null
+++ b/tests/inlineasm/rv32/asmarith.py.exp
@@ -0,0 +1,14 @@
+0xfe
+0x111
+1
+0
+0
+0
+1
+0
+0
+0
+0
+1
+0
+1
diff --git a/tests/inlineasm/rv32/asmbranch.py b/tests/inlineasm/rv32/asmbranch.py
new file mode 100644
index 000000000..d7d059d40
--- /dev/null
+++ b/tests/inlineasm/rv32/asmbranch.py
@@ -0,0 +1,161 @@
+# test branch instructions
+
+
+@micropython.asm_rv32
+def tbeq(a0):
+ mv(a1, a0)
+
+ li(a0, 10)
+ li(a2, 1)
+ beq(a1, a2, end)
+
+ li(a0, 20)
+ li(a2, 2)
+ beq(a1, a2, end)
+
+ li(a0, 30)
+ li(a2, 3)
+ beq(a1, a2, end)
+
+ li(a0, 0)
+
+ label(end)
+
+
+print(tbeq(0))
+print(tbeq(1))
+print(tbeq(2))
+print(tbeq(3))
+
+
+@micropython.asm_rv32
+def tbne(a0):
+ mv(a1, a0)
+
+ li(a0, 10)
+ li(a2, 1)
+ bne(a1, a2, end)
+
+ li(a0, 20)
+ li(a2, 2)
+ bne(a1, a2, end)
+
+ li(a0, 30)
+ li(a2, 3)
+ bne(a1, a2, end)
+
+ li(a0, 0)
+
+ label(end)
+
+
+print(tbne(0))
+print(tbne(1))
+print(tbne(2))
+print(tbne(3))
+
+
+@micropython.asm_rv32
+def tbgeu(a0):
+ mv(a1, a0)
+
+ li(a0, 1)
+ li(a2, 2)
+ bgeu(a1, a2, end)
+ li(a0, 0)
+
+ label(end)
+
+
+print(tbgeu(0))
+print(tbgeu(1))
+print(tbgeu(2))
+print(tbgeu(3))
+
+
+@micropython.asm_rv32
+def tbltu(a0):
+ mv(a1, a0)
+
+ li(a0, 1)
+ li(a2, 2)
+ bltu(a1, a2, end)
+ li(a0, 0)
+
+ label(end)
+
+
+print(tbltu(0))
+print(tbltu(1))
+print(tbltu(2))
+print(tbltu(3))
+
+
+@micropython.asm_rv32
+def tbge(a0):
+ mv(a1, a0)
+
+ li(a0, 1)
+ li(a2, -2)
+ bge(a1, a2, end)
+ li(a0, 0)
+
+ label(end)
+
+
+print(tbge(-3))
+print(tbge(-2))
+print(tbge(-1))
+print(tbge(0))
+
+
+@micropython.asm_rv32
+def tblt(a0):
+ mv(a1, a0)
+
+ li(a0, 1)
+ li(a2, -2)
+ blt(a1, a2, end)
+ li(a0, 0)
+
+ label(end)
+
+
+print(tblt(-3))
+print(tblt(-2))
+print(tblt(-1))
+print(tblt(0))
+
+
+@micropython.asm_rv32
+def tcbeqz(a0):
+ mv(a1, a0)
+
+ li(a0, 1)
+ c_beqz(a1, end)
+ li(a0, 0)
+
+ label(end)
+
+
+print(tcbeqz(0))
+print(tcbeqz(1))
+print(tcbeqz(2))
+print(tcbeqz(3))
+
+
+@micropython.asm_rv32
+def tcbnez(a0):
+ mv(a1, a0)
+
+ li(a0, 1)
+ c_bnez(a1, end)
+ li(a0, 0)
+
+ label(end)
+
+
+print(tcbnez(0))
+print(tcbnez(1))
+print(tcbnez(2))
+print(tcbnez(3))
diff --git a/tests/inlineasm/rv32/asmbranch.py.exp b/tests/inlineasm/rv32/asmbranch.py.exp
new file mode 100644
index 000000000..baae69149
--- /dev/null
+++ b/tests/inlineasm/rv32/asmbranch.py.exp
@@ -0,0 +1,32 @@
+0
+10
+20
+30
+10
+20
+10
+10
+0
+0
+1
+1
+1
+1
+0
+0
+0
+1
+1
+1
+1
+0
+0
+0
+1
+0
+0
+0
+0
+1
+1
+1
diff --git a/tests/inlineasm/rv32/asmconst.py b/tests/inlineasm/rv32/asmconst.py
new file mode 100644
index 000000000..2b6363a43
--- /dev/null
+++ b/tests/inlineasm/rv32/asmconst.py
@@ -0,0 +1,49 @@
+# test constants in assembler
+
+
+@micropython.asm_rv32
+def c1():
+ li(a0, 0xFFFFFFFF)
+ li(a1, 0xF0000000)
+ sub(a0, a0, a1)
+
+
+print(hex(c1()))
+
+
+@micropython.asm_rv32
+def c2():
+ lui(a0, 0x12345)
+ li(a1, 0x678)
+ add(a0, a0, a1)
+
+
+print(hex(c2()))
+
+
+@micropython.asm_rv32
+def c3() -> uint:
+ lui(a0, 0)
+ addi(a0, a0, 0x7FF)
+
+
+print(hex(c3()))
+
+
+@micropython.asm_rv32
+def c4() -> uint:
+ lui(a0, 0)
+ addi(a0, a0, -1)
+
+
+print(hex(c4()))
+
+
+@micropython.asm_rv32
+def c5():
+ c_lui(a0, 1)
+ c_li(a1, 1)
+ c_add(a0, a1)
+
+
+print(hex(c5()))
diff --git a/tests/inlineasm/rv32/asmconst.py.exp b/tests/inlineasm/rv32/asmconst.py.exp
new file mode 100644
index 000000000..0c713a841
--- /dev/null
+++ b/tests/inlineasm/rv32/asmconst.py.exp
@@ -0,0 +1,5 @@
+0xfffffff
+0x12345678
+0x7ff
+0xffffffff
+0x1001
diff --git a/tests/inlineasm/rv32/asmcsr.py b/tests/inlineasm/rv32/asmcsr.py
new file mode 100644
index 000000000..f27e2aa5e
--- /dev/null
+++ b/tests/inlineasm/rv32/asmcsr.py
@@ -0,0 +1,65 @@
+# test csr instructions
+
+# CSR 0x340 is `mscratch`. This test suite is only safe to run on a system
+# where it is known that there is no other code that can read from or write
+# to that register. The qemu port is one such system, as the CSR is only
+# accessed when a machine exception occurs, and at that point it doesn't matter
+# anymore whether these tests are running or not.
+
+
+@micropython.asm_rv32
+def csr():
+ li(a0, 0)
+ csrrw(zero, zero, 0x340) # All zeroes
+ csrrs(a1, zero, 0x340) # Read zeroes
+ c_bnez(a1, end)
+ addi(a0, a0, 1)
+ li(a1, 0xA5A5A5A5)
+ li(a2, 0x5A5A5A5A)
+ csrrs(a2, a1, 0x340) # Read zeroes, set 0xA5A5A5A5
+ c_bnez(a2, end)
+ addi(a0, a0, 1)
+ csrrs(a3, zero, 0x340) # Read 0xA5A5A5A5
+ bne(a3, a1, end)
+ addi(a0, a0, 1)
+ li(a2, 0xF0F0F0F0)
+ csrrc(zero, a2, 0x340) # Clear upper half
+ csrrs(a3, zero, 0x340) # Read 0x05050505
+ xori(a2, a2, -1)
+ and_(a2, a1, a2)
+ bne(a2, a3, end)
+ addi(a0, a0, 1)
+ label(end)
+
+
+print(csr())
+
+
+@micropython.asm_rv32
+def csri():
+ li(a0, 0)
+ csrrwi(zero, 0x340, 15) # Write 0xF
+ csrrs(a1, zero, 0x340) # Read 0xF
+ csrrsi(a2, 0x340, 0) # Read
+ bne(a1, a2, end)
+ addi(a0, a0, 1)
+ csrrci(a2, 0x340, 0) # Read
+ bne(a1, a2, end)
+ addi(a0, a0, 1)
+ li(a2, 15)
+ bne(a1, a2, end)
+ addi(a0, a0, 1)
+ csrrci(zero, 0x340, 1) # Clear bit 1
+ csrrs(a1, zero, 0x340) # Read 0xE
+ li(a2, 14)
+ bne(a1, a2, end)
+ addi(a0, a0, 1)
+ csrrsi(zero, 0x340, 1) # Set bit 1
+ csrrs(a1, zero, 0x340) # Read 0xF
+ li(a2, 15)
+ bne(a1, a2, end)
+ addi(a0, a0, 1)
+ label(end)
+
+
+print(csri())
diff --git a/tests/inlineasm/rv32/asmcsr.py.exp b/tests/inlineasm/rv32/asmcsr.py.exp
new file mode 100644
index 000000000..61c83cba4
--- /dev/null
+++ b/tests/inlineasm/rv32/asmcsr.py.exp
@@ -0,0 +1,2 @@
+4
+5
diff --git a/tests/inlineasm/rv32/asmdata.py b/tests/inlineasm/rv32/asmdata.py
new file mode 100644
index 000000000..5e555ef4b
--- /dev/null
+++ b/tests/inlineasm/rv32/asmdata.py
@@ -0,0 +1,33 @@
+# test the "data" directive
+
+
+@micropython.asm_rv32
+def ret_num(a0) -> uint:
+ slli(a0, a0, 2)
+ addi(a0, a0, 16)
+ auipc(a1, 0)
+ add(a1, a1, a0)
+ lw(a0, 0(a1))
+ jal(zero, HERE)
+ data(4, 0x12345678, 0x20000000, 0x40000000, 0x7FFFFFFF + 1, (1 << 32) - 2)
+ label(HERE)
+
+
+for i in range(5):
+ print(hex(ret_num(i)))
+
+
+@micropython.asm_rv32
+def ret_num_la(a0) -> uint:
+ slli(a0, a0, 2)
+ la(a1, DATA)
+ add(a1, a1, a0)
+ lw(a0, 0(a1))
+ jal(zero, HERE)
+ label(DATA)
+ data(4, 0x12345678, 0x20000000, 0x40000000, 0x7FFFFFFF + 1, (1 << 32) - 2)
+ label(HERE)
+
+
+for i in range(5):
+ print(hex(ret_num_la(i)))
diff --git a/tests/inlineasm/rv32/asmdata.py.exp b/tests/inlineasm/rv32/asmdata.py.exp
new file mode 100644
index 000000000..79e92bdfa
--- /dev/null
+++ b/tests/inlineasm/rv32/asmdata.py.exp
@@ -0,0 +1,10 @@
+0x12345678
+0x20000000
+0x40000000
+0x80000000
+0xfffffffe
+0x12345678
+0x20000000
+0x40000000
+0x80000000
+0xfffffffe
diff --git a/tests/inlineasm/rv32/asmdivmul.py b/tests/inlineasm/rv32/asmdivmul.py
new file mode 100644
index 000000000..e1120c6f6
--- /dev/null
+++ b/tests/inlineasm/rv32/asmdivmul.py
@@ -0,0 +1,63 @@
+@micropython.asm_rv32
+def sdiv(a0, a1):
+ div(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def udiv(a0, a1):
+ divu(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def srem(a0, a1):
+ rem(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def urem(a0, a1):
+ remu(a0, a0, a1)
+
+
+print(sdiv(1234, 3))
+print(sdiv(-1234, 3))
+print(sdiv(1234, -3))
+print(sdiv(-1234, -3))
+
+print(udiv(1234, 3))
+print(udiv(0xFFFFFFFF, 0x7FFFFFFF))
+print(udiv(0xFFFFFFFF, 0xFFFFFFFF))
+
+print(srem(1234, 3))
+print(srem(-1234, 3))
+print(srem(1234, -3))
+print(srem(-1234, -3))
+
+print(urem(1234, 3))
+print(urem(0xFFFFFFFF, 0x7FFFFFFF))
+print(urem(0xFFFFFFFF, 0xFFFFFFFF))
+
+
+@micropython.asm_rv32
+def m1(a0, a1):
+ mul(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def m2(a0, a1):
+ mulh(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def m3(a0, a1):
+ mulhu(a0, a0, a1)
+
+
+@micropython.asm_rv32
+def m4(a0, a1):
+ mulhsu(a0, a0, a1)
+
+
+print(m1(0xFFFFFFFF, 2))
+print(m2(0xFFFFFFFF, 0xFFFFFFF0))
+print(m3(0xFFFFFFFF, 0xFFFFFFF0))
+print(m4(0xFFFFFFFF, 0xFFFFFFF0))
diff --git a/tests/inlineasm/rv32/asmdivmul.py.exp b/tests/inlineasm/rv32/asmdivmul.py.exp
new file mode 100644
index 000000000..60d28635f
--- /dev/null
+++ b/tests/inlineasm/rv32/asmdivmul.py.exp
@@ -0,0 +1,18 @@
+411
+-411
+-411
+411
+411
+2
+1
+1
+-1
+1
+-1
+1
+1
+0
+-2
+0
+-17
+-1
diff --git a/tests/inlineasm/rv32/asmjump.py b/tests/inlineasm/rv32/asmjump.py
new file mode 100644
index 000000000..fe87d3f96
--- /dev/null
+++ b/tests/inlineasm/rv32/asmjump.py
@@ -0,0 +1,115 @@
+@micropython.asm_rv32
+def f1():
+ li(a0, 0)
+ la(a1, END)
+ c_jr(a1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ label(END)
+
+
+print(f1())
+
+
+@micropython.asm_rv32
+def f2():
+ addi(sp, sp, -4)
+ c_swsp(ra, 0)
+ li(ra, 0)
+ li(a0, 0)
+ c_jal(END)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ label(END)
+ bne(ra, zero, SUCCESS)
+ c_addi(a0, 2)
+ label(SUCCESS)
+ c_lwsp(ra, 0)
+ addi(sp, sp, 4)
+
+
+print(f2())
+
+
+@micropython.asm_rv32
+def f3():
+ li(a0, 0)
+ c_j(END)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ label(END)
+
+
+print(f3())
+
+
+@micropython.asm_rv32
+def f4():
+ addi(sp, sp, -4)
+ c_swsp(ra, 0)
+ li(ra, 0)
+ li(a0, 0)
+ la(a1, END)
+ c_jalr(a1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ label(END)
+ bne(ra, zero, SUCCESS)
+ c_addi(a0, 2)
+ label(SUCCESS)
+ c_lwsp(ra, 0)
+ addi(sp, sp, 4)
+
+
+print(f4())
+
+
+@micropython.asm_rv32
+def f5():
+ li(a0, 0)
+ li(a1, 0)
+ jal(a1, END)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ label(END)
+ bne(a1, zero, SUCCESS)
+ c_addi(a0, 2)
+ label(SUCCESS)
+
+
+print(f5())
+
+
+@micropython.asm_rv32
+def f6():
+ li(a0, 0)
+ la(a1, JUMP)
+ li(a2, 0)
+ jalr(a2, a1, 10)
+ label(JUMP)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ c_addi(a0, 1)
+ bne(a2, zero, SUCCESS)
+ c_addi(a0, 2)
+ label(SUCCESS)
+
+
+print(f6())
diff --git a/tests/inlineasm/rv32/asmjump.py.exp b/tests/inlineasm/rv32/asmjump.py.exp
new file mode 100644
index 000000000..f7eb44d66
--- /dev/null
+++ b/tests/inlineasm/rv32/asmjump.py.exp
@@ -0,0 +1,6 @@
+0
+0
+0
+0
+0
+0
diff --git a/tests/inlineasm/rv32/asmloadstore.py b/tests/inlineasm/rv32/asmloadstore.py
new file mode 100644
index 000000000..2c49e07b4
--- /dev/null
+++ b/tests/inlineasm/rv32/asmloadstore.py
@@ -0,0 +1,86 @@
+# test load/store opcodes
+
+
+@micropython.asm_rv32
+def l():
+ li(a5, 4)
+ addi(sp, sp, -12)
+ li(a0, 0x123)
+ c_swsp(a0, 0)
+ addi(a1, a0, 0x111)
+ c_swsp(a1, 4)
+ addi(a2, a1, 0x111)
+ c_swsp(a2, 8)
+ mv(a4, sp)
+ c_lw(a3, 0(a4))
+ bne(a3, a0, END)
+ addi(a5, a5, -1)
+ lw(a3, 4(a4))
+ bne(a3, a1, END)
+ addi(a5, a5, -1)
+ lhu(a3, 8(a4))
+ bne(a3, a2, END)
+ addi(a5, a5, -1)
+ lbu(a0, 8(a4))
+ addi(a0, a0, 0x300)
+ bne(a0, a2, END)
+ addi(a5, a5, -1)
+ label(END)
+ addi(sp, sp, 12)
+ mv(a0, a5)
+
+
+print(l())
+
+
+@micropython.asm_rv32
+def s():
+ li(a5, 4)
+ addi(sp, sp, -12)
+ c_swsp(zero, 0)
+ c_swsp(zero, 4)
+ c_swsp(zero, 8)
+ li(a0, 0x12345)
+ mv(a4, sp)
+ c_sw(a0, 0(a4))
+ sh(a0, 4(a4))
+ sb(a0, 8(a4))
+ li(a1, 0xFFFF)
+ and_(a1, a0, a1)
+ andi(a2, a0, 0xFF)
+ lw(a3, 0(sp))
+ bne(a3, a0, END)
+ addi(a5, a5, -1)
+ lw(a3, 4(sp))
+ bne(a3, a1, END)
+ addi(a5, a5, -1)
+ lw(a3, 8(sp))
+ bne(a3, a2, END)
+ addi(a5, a5, -1)
+ label(END)
+ addi(sp, sp, 12)
+ mv(a0, a5)
+
+
+print(s())
+
+
+@micropython.asm_rv32
+def lu():
+ li(a5, 4)
+ addi(sp, sp, -8)
+ li(a0, 0xF1234567)
+ c_swsp(a0, 0)
+ c_swsp(a0, 4)
+ lh(a1, 0(sp))
+ blt(a1, zero, END)
+ addi(a5, a5, -1)
+ lb(a2, 4(sp))
+ blt(a2, zero, END)
+ addi(a5, a5, -1)
+ label(END)
+ addi(sp, sp, 8)
+ mv(a0, a5)
+
+
+print(lu())
diff --git a/tests/inlineasm/rv32/asmloadstore.py.exp b/tests/inlineasm/rv32/asmloadstore.py.exp
new file mode 100644
index 000000000..4539bbf2d
--- /dev/null
+++ b/tests/inlineasm/rv32/asmloadstore.py.exp
@@ -0,0 +1,3 @@
+0
+1
+2
diff --git a/tests/inlineasm/rv32/asmrettype.py b/tests/inlineasm/rv32/asmrettype.py
new file mode 100644
index 000000000..fc7ae61d1
--- /dev/null
+++ b/tests/inlineasm/rv32/asmrettype.py
@@ -0,0 +1,33 @@
+# test return type of inline asm
+
+
+@micropython.asm_rv32
+def ret_obj(a0) -> object:
+ pass
+
+
+ret_obj(print)(1)
+
+
+@micropython.asm_rv32
+def ret_bool(a0) -> bool:
+ pass
+
+
+print(ret_bool(0), ret_bool(1))
+
+
+@micropython.asm_rv32
+def ret_int(a0) -> int:
+ slli(a0, a0, 29)
+
+
+print(ret_int(0), hex(ret_int(1)), hex(ret_int(2)), hex(ret_int(4)))
+
+
+@micropython.asm_rv32
+def ret_uint(a0) -> uint:
+ slli(a0, a0, 29)
+
+
+print(ret_uint(0), hex(ret_uint(1)), hex(ret_uint(2)), hex(ret_uint(4)))
diff --git a/tests/inlineasm/rv32/asmrettype.py.exp b/tests/inlineasm/rv32/asmrettype.py.exp
new file mode 100644
index 000000000..cbb49d247
--- /dev/null
+++ b/tests/inlineasm/rv32/asmrettype.py.exp
@@ -0,0 +1,4 @@
+1
+False True
+0 0x20000000 0x40000000 -0x80000000
+0 0x20000000 0x40000000 0x80000000
diff --git a/tests/inlineasm/rv32/asmsanity.py b/tests/inlineasm/rv32/asmsanity.py
new file mode 100644
index 000000000..1a16d3504
--- /dev/null
+++ b/tests/inlineasm/rv32/asmsanity.py
@@ -0,0 +1,204 @@
+TEMPLATE3 = """
+@micropython.asm_rv32
+def f():
+ {}({}, {}, {})
+"""
+
+TEMPLATE2 = """
+@micropython.asm_rv32
+def f():
+ {}({}, {})
+"""
+
+TEMPLATE1 = """
+@micropython.asm_rv32
+def f():
+ {}({})
+"""
+
+
+REGISTERS = [
+ "zero",
+ "s0",
+ "s1",
+ "s2",
+ "s3",
+ "s4",
+ "s5",
+ "s6",
+ "s7",
+ "s8",
+ "s9",
+ "s10",
+ "s11",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ "a5",
+ "a6",
+ "a7",
+ "tp",
+ "gp",
+ "sp",
+ "ra",
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "x0",
+ "x1",
+ "x2",
+ "x3",
+ "x4",
+ "x5",
+ "x6",
+ "x7",
+ "x8",
+ "x9",
+ "x10",
+ "x11",
+ "x12",
+ "x13",
+ "x14",
+ "x15",
+ "x16",
+ "x17",
+ "x18",
+ "x19",
+ "x20",
+ "x21",
+ "x22",
+ "x23",
+ "x24",
+ "x25",
+ "x26",
+ "x27",
+ "x28",
+ "x29",
+ "x30",
+ "x31",
+]
+
+
+def harness(opcode, fragment, tag):
+ try:
+ exec(fragment)
+ except SyntaxError:
+ print(tag, opcode)
+
+
+for opcode in ("slli", "srli", "srai"):
+ harness(opcode, TEMPLATE3.format(opcode, "a0", "a0", -1), "-")
+ harness(opcode, TEMPLATE3.format(opcode, "a0", "a0", 33), "+")
+
+for opcode in ("c_slli", "c_srli", "c_srai"):
+ harness(opcode, TEMPLATE2.format(opcode, "a0", -1), "-")
+ harness(opcode, TEMPLATE2.format(opcode, "a0", 33), "+")
+
+harness("c_slli", TEMPLATE2.format("c_slli", "zero", 0), "0")
+harness("c_slli", TEMPLATE2.format("c_slli", "x0", 0), "0")
+
+for opcode in ("c_srli", "c_srai"):
+ for register in REGISTERS:
+ harness(opcode, TEMPLATE2.format(opcode, register, 0), register)
+
+for opcode in ("c_mv", "c_add"):
+ harness(opcode, TEMPLATE2.format(opcode, "a0", "zero"), "0l")
+ harness(opcode, TEMPLATE2.format(opcode, "zero", "a0"), "0r")
+ harness(opcode, TEMPLATE2.format(opcode, "zero", "zero"), "0b")
+
+harness("c_jr", TEMPLATE1.format("c_jr", "zero"), "0")
+
+for opcode in ("addi", "andi", "ori", "slti", "sltiu", "xori"):
+ harness(opcode, TEMPLATE3.format(opcode, "a0", "a0", 0x7FF), ">=s")
+ harness(opcode, TEMPLATE3.format(opcode, "a0", "a0", 0x800), ">s")
+ harness(opcode, TEMPLATE3.format(opcode, "a0", "a0", -2048), "<=s")
+ harness(opcode, TEMPLATE3.format(opcode, "a0", "a0", -2049), "<s")
+
+for opcode in ("lb", "lbu", "lh", "lhu", "lw", "sb", "sh", "sw"):
+ TEMPLATE = """
+@micropython.asm_rv32
+def f():
+ {}(a0, {}(a0))
+"""
+ harness(opcode, TEMPLATE.format(opcode, 0x7FF), ">=s")
+ harness(opcode, TEMPLATE.format(opcode, 0x800), ">s")
+ harness(opcode, TEMPLATE.format(opcode, -2048), "<=s")
+ harness(opcode, TEMPLATE.format(opcode, -2049), "<s")
+
+harness("c_addi", TEMPLATE2.format("c_andi", "zero", 0), "00")
+harness("c_addi", TEMPLATE2.format("c_andi", "zero", 512), ">0")
+harness("c_addi", TEMPLATE2.format("c_andi", "zero", -512), "<0")
+harness("c_addi", TEMPLATE2.format("c_andi", "s0", 0), "s0")
+harness("c_addi", TEMPLATE2.format("c_andi", "s0", -100), "<s")
+harness("c_addi", TEMPLATE2.format("c_andi", "s0", 100), ">s")
+
+harness("c_andi", TEMPLATE2.format("c_andi", "zero", 0), "00")
+harness("c_andi", TEMPLATE2.format("c_andi", "zero", 512), ">0")
+harness("c_andi", TEMPLATE2.format("c_andi", "zero", -512), "<0")
+harness("c_andi", TEMPLATE2.format("c_andi", "s0", 0), "s0")
+harness("c_andi", TEMPLATE2.format("c_andi", "s0", -100), "<s")
+harness("c_andi", TEMPLATE2.format("c_andi", "s0", 100), ">s")
+
+C_REGISTERS = (
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ "a5",
+ "s0",
+ "s1",
+ "x8",
+ "x9",
+ "x10",
+ "x11",
+ "x12",
+ "x13",
+ "x14",
+ "x15",
+)
+
+for opcode in ("c_and", "c_or", "c_xor"):
+ for source in REGISTERS:
+ for destination in REGISTERS:
+ if source in C_REGISTERS and destination in C_REGISTERS:
+ try:
+ exec(
+ """
+@micropython.asm_rv32
+def f():
+ {}({}, {})
+""".format(opcode, source, destination)
+ )
+ except SyntaxError:
+ print(source, destination, opcode)
+ else:
+ try:
+ exec(
+ """
+@micropython.asm_rv32
+def f():
+ {}({}, {})
+""".format(opcode, source, destination)
+ )
+ print(source, destination, opcode)
+ except SyntaxError:
+ pass
+ print(opcode)
+
+for opcode in ("c_lw", "c_sw"):
+ TEMPLATE = """
+@micropython.asm_rv32
+def f():
+ {}(a0, {}(a0))
+"""
+ harness(opcode, TEMPLATE.format(opcode, 60), ">=s")
+ harness(opcode, TEMPLATE.format(opcode, 61), ">s")
+ harness(opcode, TEMPLATE.format(opcode, -60), "<=s")
+ harness(opcode, TEMPLATE.format(opcode, -61), "<s")
diff --git a/tests/inlineasm/rv32/asmsanity.py.exp b/tests/inlineasm/rv32/asmsanity.py.exp
new file mode 100644
index 000000000..c9d9b69d2
--- /dev/null
+++ b/tests/inlineasm/rv32/asmsanity.py.exp
@@ -0,0 +1,162 @@
+- slli
++ slli
+- srli
++ srli
+- srai
++ srai
+- c_slli
++ c_slli
+- c_srli
++ c_srli
+- c_srai
++ c_srai
+0 c_slli
+0 c_slli
+zero c_srli
+s2 c_srli
+s3 c_srli
+s4 c_srli
+s5 c_srli
+s6 c_srli
+s7 c_srli
+s8 c_srli
+s9 c_srli
+s10 c_srli
+s11 c_srli
+a6 c_srli
+a7 c_srli
+tp c_srli
+gp c_srli
+sp c_srli
+ra c_srli
+t0 c_srli
+t1 c_srli
+t2 c_srli
+t3 c_srli
+t4 c_srli
+t5 c_srli
+t6 c_srli
+x0 c_srli
+x1 c_srli
+x2 c_srli
+x3 c_srli
+x4 c_srli
+x5 c_srli
+x6 c_srli
+x7 c_srli
+x16 c_srli
+x17 c_srli
+x18 c_srli
+x19 c_srli
+x20 c_srli
+x21 c_srli
+x22 c_srli
+x23 c_srli
+x24 c_srli
+x25 c_srli
+x26 c_srli
+x27 c_srli
+x28 c_srli
+x29 c_srli
+x30 c_srli
+x31 c_srli
+zero c_srai
+s2 c_srai
+s3 c_srai
+s4 c_srai
+s5 c_srai
+s6 c_srai
+s7 c_srai
+s8 c_srai
+s9 c_srai
+s10 c_srai
+s11 c_srai
+a6 c_srai
+a7 c_srai
+tp c_srai
+gp c_srai
+sp c_srai
+ra c_srai
+t0 c_srai
+t1 c_srai
+t2 c_srai
+t3 c_srai
+t4 c_srai
+t5 c_srai
+t6 c_srai
+x0 c_srai
+x1 c_srai
+x2 c_srai
+x3 c_srai
+x4 c_srai
+x5 c_srai
+x6 c_srai
+x7 c_srai
+x16 c_srai
+x17 c_srai
+x18 c_srai
+x19 c_srai
+x20 c_srai
+x21 c_srai
+x22 c_srai
+x23 c_srai
+x24 c_srai
+x25 c_srai
+x26 c_srai
+x27 c_srai
+x28 c_srai
+x29 c_srai
+x30 c_srai
+x31 c_srai
+0l c_mv
+0r c_mv
+0b c_mv
+0l c_add
+0r c_add
+0b c_add
+0 c_jr
+>s addi
+<s addi
+>s andi
+<s andi
+>s ori
+<s ori
+>s slti
+<s slti
+>s sltiu
+<s sltiu
+>s xori
+<s xori
+>s lb
+<s lb
+>s lbu
+<s lbu
+>s lh
+<s lh
+>s lhu
+<s lhu
+>s lw
+<s lw
+>s sb
+<s sb
+>s sh
+<s sh
+>s sw
+<s sw
+00 c_addi
+>0 c_addi
+<0 c_addi
+<s c_addi
+>s c_addi
+00 c_andi
+>0 c_andi
+<0 c_andi
+<s c_andi
+>s c_andi
+c_and
+c_or
+c_xor
+>s c_lw
+<s c_lw
+>s c_sw
+<s c_sw
diff --git a/tests/inlineasm/rv32/asmshift.py b/tests/inlineasm/rv32/asmshift.py
new file mode 100644
index 000000000..89a231f4d
--- /dev/null
+++ b/tests/inlineasm/rv32/asmshift.py
@@ -0,0 +1,121 @@
+@micropython.asm_rv32
+def lsl1(a0):
+ slli(a0, a0, 1)
+
+
+print(hex(lsl1(0x123)))
+
+
+@micropython.asm_rv32
+def lsl23(a0):
+ slli(a0, a0, 23)
+
+
+print(hex(lsl23(1)))
+
+
+@micropython.asm_rv32
+def lsr1(a0):
+ srli(a0, a0, 1)
+
+
+print(hex(lsr1(0x123)))
+
+
+@micropython.asm_rv32
+def lsr31(a0):
+ srli(a0, a0, 31)
+
+
+print(hex(lsr31(0x80000000)))
+
+
+@micropython.asm_rv32
+def asr1(a0):
+ srai(a0, a0, 1)
+
+
+print(hex(asr1(0x123)))
+
+
+@micropython.asm_rv32
+def asr31(a0):
+ srai(a0, a0, 31)
+
+
+print(hex(asr31(0x80000000)))
+
+
+@micropython.asm_rv32
+def clsl1(a0):
+ c_slli(a0, 1)
+
+
+print(hex(clsl1(0x123)))
+
+
+@micropython.asm_rv32
+def clsl23(a0):
+ c_slli(a0, 23)
+
+
+print(hex(clsl23(1)))
+
+
+@micropython.asm_rv32
+def clsr1(a0):
+ c_srli(a0, 1)
+
+
+print(hex(clsr1(0x123)))
+
+
+@micropython.asm_rv32
+def clsr31(a0):
+ c_srli(a0, 31)
+
+
+print(hex(clsr31(0x80000000)))
+
+
+@micropython.asm_rv32
+def casr1(a0):
+ c_srai(a0, 1)
+
+
+print(hex(casr1(0x123)))
+
+
+@micropython.asm_rv32
+def casr31(a0):
+ c_srai(a0, 31)
+
+
+print(hex(casr31(0x80000000)))
+
+
+@micropython.asm_rv32
+def lsl1r(a0):
+ li(a1, 1)
+ sll(a0, a0, a1)
+
+
+print(hex(lsl1r(0x123)))
+
+
+@micropython.asm_rv32
+def lsr1r(a0):
+ li(a1, 1)
+ srl(a0, a0, a1)
+
+
+print(hex(lsr1r(0x123)))
+
+
+@micropython.asm_rv32
+def asr1r(a0):
+ li(a1, 1)
+ sra(a0, a0, a1)
+
+
+print(hex(asr1r(0x123)))
diff --git a/tests/inlineasm/rv32/asmshift.py.exp b/tests/inlineasm/rv32/asmshift.py.exp
new file mode 100644
index 000000000..feb4cc707
--- /dev/null
+++ b/tests/inlineasm/rv32/asmshift.py.exp
@@ -0,0 +1,15 @@
+0x246
+0x800000
+0x91
+0x1
+0x91
+-0x1
+0x246
+0x800000
+0x91
+0x1
+0x91
+-0x1
+0x246
+0x91
+0x91
diff --git a/tests/inlineasm/rv32/asmstack.py b/tests/inlineasm/rv32/asmstack.py
new file mode 100644
index 000000000..9035f8728
--- /dev/null
+++ b/tests/inlineasm/rv32/asmstack.py
@@ -0,0 +1,65 @@
+# test stack manipulation
+
+
+@micropython.asm_rv32
+def l():
+ li(a2, 3)
+ addi(sp, sp, -8)
+ li(a0, 0x123)
+ sw(a0, 0(sp))
+ addi(a1, a0, 0x111)
+ sw(a1, 4(sp))
+ c_lwsp(a3, 0)
+ bne(a3, a0, END)
+ addi(a2, a2, -1)
+ c_lwsp(a3, 4)
+ bne(a3, a1, END)
+ addi(a2, a2, -1)
+ label(END)
+ addi(sp, sp, 8)
+ mv(a0, a2)
+
+
+print(l())
+
+
+@micropython.asm_rv32
+def s():
+ li(a2, 3)
+ addi(sp, sp, -8)
+ li(a0, 0x123)
+ c_swsp(a0, 0)
+ addi(a1, a0, 0x111)
+ c_swsp(a1, 4)
+ lw(a3, 0(sp))
+ bne(a3, a0, END)
+ addi(a2, a2, -1)
+ lw(a3, 4(sp))
+ bne(a3, a1, END)
+ addi(a2, a2, -1)
+ label(END)
+ addi(sp, sp, 8)
+ mv(a0, a2)
+
+
+print(s())
+
+
+@micropython.asm_rv32
+def a():
+ li(a2, 3)
+ addi(sp, sp, -8)
+ li(a0, 0x123)
+ sw(a0, 0(sp))
+ addi(a1, a0, 0x111)
+ sw(a1, 4(sp))
+ c_addi4spn(a3, 4)
+ lw(a3, -4(a3))
+ bne(a3, a0, END)
+ addi(a2, a2, -1)
+ label(END)
+ addi(sp, sp, 8)
+ mv(a0, a2)
+
+
+print(a())
diff --git a/tests/inlineasm/rv32/asmstack.py.exp b/tests/inlineasm/rv32/asmstack.py.exp
new file mode 100644
index 000000000..33280629d
--- /dev/null
+++ b/tests/inlineasm/rv32/asmstack.py.exp
@@ -0,0 +1,3 @@
+1
+1
+2
diff --git a/tests/inlineasm/rv32/asmsum.py b/tests/inlineasm/rv32/asmsum.py
new file mode 100644
index 000000000..5d805553b
--- /dev/null
+++ b/tests/inlineasm/rv32/asmsum.py
@@ -0,0 +1,59 @@
+@micropython.asm_rv32
+def asm_sum_words(a0, a1):
+ # a0 = len
+ # a1 = ptr
+ # a2 = sum
+ # a3 = dummy
+ li(a2, 0)
+
+ jal(zero, loop_entry)
+
+ label(loop1)
+ lw(a3, 0(a1))
+ add(a2, a2, a3)
+
+ addi(a1, a1, 4)
+ addi(a0, a0, -1)
+
+ label(loop_entry)
+ bne(a0, zero, loop1)
+
+ mv(a0, a2)
+
+
+@micropython.asm_rv32
+def asm_sum_bytes(a0, a1):
+ # a0 = len
+ # a1 = ptr
+ # a2 = sum
+ # a3 = dummy
+ li(a2, 0)
+
+ jal(zero, loop_entry)
+
+ label(loop1)
+ lbu(a3, 0(a1))
+ add(a2, a2, a3)
+
+ addi(a1, a1, 1)
+ addi(a0, a0, -1)
+
+ label(loop_entry)
+ bne(a0, zero, loop1)
+
+ mv(a0, a2)
+
+
+import array
+
+b = array.array("l", (100, 200, 300, 400))
+n = asm_sum_words(len(b), b)
+print(b, n)
+
+b = array.array("b", (10, 20, 30, 40, 50, 60, 70, 80))
+n = asm_sum_bytes(len(b), b)
+print(b, n)
+
+b = b"\x01\x02\x03\x04"
+n = asm_sum_bytes(len(b), b)
+print(b, n)
diff --git a/tests/inlineasm/rv32/asmsum.py.exp b/tests/inlineasm/rv32/asmsum.py.exp
new file mode 100644
index 000000000..3c83da367
--- /dev/null
+++ b/tests/inlineasm/rv32/asmsum.py.exp
@@ -0,0 +1,3 @@
+array('l', [100, 200, 300, 400]) 1000
+array('b', [10, 20, 30, 40, 50, 60, 70, 80]) 360
+b'\x01\x02\x03\x04' 10
diff --git a/tests/ports/qemu/asm_test.py b/tests/ports/qemu/asm_test.py
index 57238c629..ab5ce6905 100644
--- a/tests/ports/qemu/asm_test.py
+++ b/tests/ports/qemu/asm_test.py
@@ -1,4 +1,10 @@
-import frozen_asm_thumb as frozen_asm
+try:
+ import frozen_asm_thumb as frozen_asm
+except ImportError:
+ try:
+ import frozen_asm_rv32 as frozen_asm
+ except ImportError:
+ raise ImportError
print(frozen_asm.asm_add(1, 2))
print(frozen_asm.asm_add1(3))
diff --git a/tests/run-tests.py b/tests/run-tests.py
index f00510f2d..db5ebe34c 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1245,9 +1245,12 @@ the last matching regex is used:
elif args.platform == "qemu":
test_dirs += (
"float",
- "inlineasm/thumb",
"ports/qemu",
)
+ if args.arch == "rv32imc":
+ test_dirs += ("inlineasm/rv32",)
+ else:
+ test_dirs += ("inlineasm/thumb",)
elif args.platform == "webassembly":
test_dirs += ("float", "ports/webassembly")
else: