diff options
-rw-r--r-- | py/asmarm.h | 1 | ||||
-rw-r--r-- | py/asmthumb.c | 15 | ||||
-rw-r--r-- | py/asmthumb.h | 2 | ||||
-rw-r--r-- | py/asmx64.c | 9 | ||||
-rw-r--r-- | py/asmx64.h | 2 | ||||
-rw-r--r-- | py/asmx86.c | 9 | ||||
-rw-r--r-- | py/asmx86.h | 2 | ||||
-rw-r--r-- | py/asmxtensa.h | 1 |
8 files changed, 0 insertions, 41 deletions
diff --git a/py/asmarm.h b/py/asmarm.h index a825dc524..f72a7f732 100644 --- a/py/asmarm.h +++ b/py/asmarm.h @@ -172,7 +172,6 @@ void asm_arm_bx_reg(asm_arm_t *as, uint reg_src); #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_arm_mov_local_reg((as), (local_num), (reg_src)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_arm_mov_reg_local((as), (reg_dest), (local_num)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_arm_mov_reg_local_addr((as), (reg_dest), (local_num)) diff --git a/py/asmthumb.c b/py/asmthumb.c index 555c21a1d..49700fbdb 100644 --- a/py/asmthumb.c +++ b/py/asmthumb.c @@ -269,21 +269,6 @@ void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) { } } -// i32 is stored as a full word in the code, and aligned to machine-word boundary -// TODO this is very inefficient, improve it! -void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) { - // align on machine-word + 2 - if ((as->base.code_offset & 3) == 0) { - asm_thumb_op16(as, ASM_THUMB_OP_NOP); - } - // jump over the i32 value (instruction prefetch adds 2 to PC) - asm_thumb_op16(as, OP_B_N(2)); - // store i32 on machine-word aligned boundary - mp_asm_base_data(&as->base, 4, i32); - // do the actual load of the i32 value - asm_thumb_mov_reg_i32_optimised(as, reg_dest, i32); -} - #define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff)) #define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff)) diff --git a/py/asmthumb.h b/py/asmthumb.h index fb42a76ac..f06e6900d 100644 --- a/py/asmthumb.h +++ b/py/asmthumb.h @@ -249,7 +249,6 @@ bool asm_thumb_bl_label(asm_thumb_t *as, uint label); void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32_src); // convenience void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience -void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32); // convenience void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); // convenience void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience @@ -308,7 +307,6 @@ void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp #define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_aligned((as), (reg_dest), (imm)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_thumb_mov_reg_local_addr((as), (reg_dest), (local_num)) diff --git a/py/asmx64.c b/py/asmx64.c index c7702942d..3e0aa4970 100644 --- a/py/asmx64.c +++ b/py/asmx64.c @@ -363,15 +363,6 @@ void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r } } -// src_i64 is stored as a full word in the code, and aligned to machine-word boundary -void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64) { - // mov instruction uses 2 bytes for the instruction, before the i64 - while (((as->base.code_offset + 2) & (WORD_SIZE - 1)) != 0) { - asm_x64_nop(as); - } - asm_x64_mov_i64_to_r64(as, src_i64, dest_r64); -} - void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) { asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64); } diff --git a/py/asmx64.h b/py/asmx64.h index b05ed9bde..e2ab1f855 100644 --- a/py/asmx64.h +++ b/py/asmx64.h @@ -85,7 +85,6 @@ void asm_x64_pop_r64(asm_x64_t* as, int dest_r64); void asm_x64_mov_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64); void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64); -void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64); void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); @@ -176,7 +175,6 @@ void asm_x64_call_ind(asm_x64_t* as, void* ptr, int temp_r32); #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x64_mov_r64_to_local((as), (reg_src), (local_num)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_optimised((as), (imm), (reg_dest)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_aligned((as), (imm), (reg_dest)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x64_mov_local_to_r64((as), (local_num), (reg_dest)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x64_mov_local_addr_to_r64((as), (local_num), (reg_dest)) diff --git a/py/asmx86.c b/py/asmx86.c index 9d96ae06a..09c11c82f 100644 --- a/py/asmx86.c +++ b/py/asmx86.c @@ -231,15 +231,6 @@ void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) { asm_x86_write_word32(as, src_i32); } -// src_i32 is stored as a full word in the code, and aligned to machine-word boundary -void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32) { - // mov instruction uses 1 byte for the instruction, before the i32 - while (((as->base.code_offset + 1) & (WORD_SIZE - 1)) != 0) { - asm_x86_nop(as); - } - asm_x86_mov_i32_to_r32(as, src_i32, dest_r32); -} - void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) { asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_AND_R32_TO_RM32); } diff --git a/py/asmx86.h b/py/asmx86.h index 5b8a69b49..15518d98c 100644 --- a/py/asmx86.h +++ b/py/asmx86.h @@ -84,7 +84,6 @@ static inline void asm_x86_end_pass(asm_x86_t *as) { void asm_x86_mov_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32); -void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32); void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); @@ -174,7 +173,6 @@ void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32); #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x86_mov_r32_to_local((as), (reg_src), (local_num)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_x86_mov_i32_to_r32_aligned((as), (imm), (reg_dest)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x86_mov_local_to_r32((as), (local_num), (reg_dest)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x86_mov_local_addr_to_r32((as), (local_num), (reg_dest)) diff --git a/py/asmxtensa.h b/py/asmxtensa.h index ad39f421c..07c3aa819 100644 --- a/py/asmxtensa.h +++ b/py/asmxtensa.h @@ -285,7 +285,6 @@ void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx); #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_xtensa_mov_local_reg((as), (local_num), (reg_src)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_xtensa_mov_reg_local((as), (reg_dest), (local_num)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mov_n((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_xtensa_mov_reg_local_addr((as), (reg_dest), (local_num)) |