From 12f36cc13c126770d81bc95daf695f3aa05bc5cb Mon Sep 17 00:00:00 2001 From: Alessandro Gatti Date: Tue, 10 Jun 2025 19:54:28 +0200 Subject: [PATCH] py/asmxtensa: Implement the full set of Viper load/store operations. This commit expands the implementation of Viper load/store operations that are optimised for the Xtensa platform. Now both load and store emitters should generate the shortest possible sequence in all cases. Redundant specialised operation emitters have been aliased to the general case implementation - this was the case of integer-indexed load/store operations with a fixed offset of zero. Signed-off-by: Alessandro Gatti --- py/asmxtensa.c | 50 +++++++++++++++++++++++++++++++++++-------------- py/asmxtensa.h | 36 +++++++++++++++++++++++------------ py/emitnative.c | 18 ------------------ 3 files changed, 60 insertions(+), 44 deletions(-) diff --git a/py/asmxtensa.c b/py/asmxtensa.c index 85a8cfef55..bc3e717d9f 100644 --- a/py/asmxtensa.c +++ b/py/asmxtensa.c @@ -299,25 +299,47 @@ void asm_xtensa_l32i_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, u } } -void asm_xtensa_s32i_optimised(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) { - if (word_offset < 16) { - asm_xtensa_op_s32i_n(as, reg_src, reg_base, word_offset); - } else if (word_offset < 256) { - asm_xtensa_op_s32i(as, reg_src, reg_base, word_offset); +void asm_xtensa_load_reg_reg_offset(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint offset, uint operation_size) { + assert(operation_size <= 2 && "Operation size value out of range."); + + if (operation_size == 2 && MP_FIT_UNSIGNED(4, offset)) { + asm_xtensa_op_l32i_n(as, reg_dest, reg_base, offset); + return; + } + + if (MP_FIT_UNSIGNED(8, offset)) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, operation_size, reg_base, reg_dest, offset)); + return; + } + + asm_xtensa_mov_reg_i32_optimised(as, reg_dest, offset << operation_size); + asm_xtensa_op_add_n(as, reg_dest, reg_base, reg_dest); + if (operation_size == 2) { + asm_xtensa_op_l32i_n(as, reg_dest, reg_dest, 0); } else { - asm_xtensa_mov_reg_i32_optimised(as, REG_TEMP, word_offset * 4); - asm_xtensa_op_add_n(as, REG_TEMP, reg_base, REG_TEMP); - asm_xtensa_op_s32i_n(as, reg_src, REG_TEMP, 0); + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, operation_size, reg_dest, reg_dest, 0)); } } -void asm_xtensa_l16ui_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint halfword_offset) { - if (halfword_offset < 256) { - asm_xtensa_op_l16ui(as, reg_dest, reg_base, halfword_offset); +void asm_xtensa_store_reg_reg_offset(asm_xtensa_t *as, uint reg_src, uint reg_base, uint offset, uint operation_size) { + assert(operation_size <= 2 && "Operation size value out of range."); + + if (operation_size == 2 && MP_FIT_UNSIGNED(4, offset)) { + asm_xtensa_op_s32i_n(as, reg_src, reg_base, offset); + return; + } + + if (MP_FIT_UNSIGNED(8, offset)) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 0x04 | operation_size, reg_base, reg_src, offset)); + return; + } + + asm_xtensa_mov_reg_i32_optimised(as, REG_TEMP, offset << operation_size); + asm_xtensa_op_add_n(as, REG_TEMP, reg_base, REG_TEMP); + if (operation_size == 2) { + asm_xtensa_op_s32i_n(as, reg_src, REG_TEMP, 0); } else { - asm_xtensa_mov_reg_i32_optimised(as, reg_dest, halfword_offset * 2); - asm_xtensa_op_add_n(as, reg_dest, reg_base, reg_dest); - asm_xtensa_op_l16ui(as, reg_dest, reg_dest, 0); + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 0x04 | operation_size, REG_TEMP, reg_src, 0)); } } diff --git a/py/asmxtensa.h b/py/asmxtensa.h index d90aef3c53..7f113ca12e 100644 --- a/py/asmxtensa.h +++ b/py/asmxtensa.h @@ -293,9 +293,8 @@ void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src); void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num); void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num); void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label); -void asm_xtensa_l32i_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset); -void asm_xtensa_s32i_optimised(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset); -void asm_xtensa_l16ui_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint halfword_offset); +void asm_xtensa_load_reg_reg_offset(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint offset, uint operation_size); +void asm_xtensa_store_reg_reg_offset(asm_xtensa_t *as, uint reg_src, uint reg_base, uint offset, uint operation_size); void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx); void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx); void asm_xtensa_bit_branch(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t bit, mp_uint_t label, mp_uint_t condition); @@ -420,16 +419,22 @@ void asm_xtensa_l32r(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t label); #define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mull((as), (reg_dest), (reg_dest), (reg_src)) #define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) ASM_LOAD32_REG_REG_OFFSET((as), (reg_dest), (reg_base), (word_offset)) -#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_xtensa_l16ui_optimised((as), (reg_dest), (reg_base), (uint16_offset)) +#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) ASM_LOAD8_REG_REG_OFFSET((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD8_REG_REG_OFFSET(as, reg_dest, reg_base, byte_offset) asm_xtensa_load_reg_reg_offset((as), (reg_dest), (reg_base), (byte_offset), 0) +#define ASM_LOAD8_REG_REG_REG(as, reg_dest, reg_base, reg_index) \ + do { \ + asm_xtensa_op_add_n((as), (reg_base), (reg_index), (reg_base)); \ + asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0); \ + } while (0) +#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) ASM_LOAD16_REG_REG_OFFSET((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, halfword_offset) asm_xtensa_load_reg_reg_offset((as), (reg_dest), (reg_base), (halfword_offset), 1) #define ASM_LOAD16_REG_REG_REG(as, reg_dest, reg_base, reg_index) \ do { \ asm_xtensa_op_addx2((as), (reg_base), (reg_index), (reg_base)); \ asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0); \ } while (0) -#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_l32i_optimised((as), (reg_dest), (reg_base), (word_offset)) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) ASM_LOAD32_REG_REG_OFFSET((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_load_reg_reg_offset((as), (reg_dest), (reg_base), (word_offset), 2) #define ASM_LOAD32_REG_REG_REG(as, reg_dest, reg_base, reg_index) \ do { \ asm_xtensa_op_addx4((as), (reg_base), (reg_index), (reg_base)); \ @@ -437,15 +442,22 @@ void asm_xtensa_l32r(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t label); } while (0) #define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) ASM_STORE32_REG_REG_OFFSET((as), (reg_dest), (reg_base), (word_offset)) -#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s8i((as), (reg_src), (reg_base), 0) -#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s16i((as), (reg_src), (reg_base), 0) +#define ASM_STORE8_REG_REG(as, reg_src, reg_base) ASM_STORE8_REG_REG_OFFSET((as), (reg_src), (reg_base), 0) +#define ASM_STORE8_REG_REG_OFFSET(as, reg_src, reg_base, byte_offset) asm_xtensa_store_reg_reg_offset((as), (reg_src), (reg_base), (byte_offset), 0) +#define ASM_STORE8_REG_REG_REG(as, reg_val, reg_base, reg_index) \ + do { \ + asm_xtensa_op_add_n((as), (reg_base), (reg_index), (reg_base)); \ + asm_xtensa_op_s8i((as), (reg_val), (reg_base), 0); \ + } while (0) +#define ASM_STORE16_REG_REG(as, reg_src, reg_base) ASM_STORE16_REG_REG_OFFSET((as), (reg_src), (reg_base), 0) +#define ASM_STORE16_REG_REG_OFFSET(as, reg_src, reg_base, halfword_offset) asm_xtensa_store_reg_reg_offset((as), (reg_src), (reg_base), (halfword_offset), 1) #define ASM_STORE16_REG_REG_REG(as, reg_val, reg_base, reg_index) \ do { \ asm_xtensa_op_addx2((as), (reg_base), (reg_index), (reg_base)); \ asm_xtensa_op_s16i((as), (reg_val), (reg_base), 0); \ } while (0) -#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s32i_n((as), (reg_src), (reg_base), 0) -#define ASM_STORE32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_s32i_optimised((as), (reg_dest), (reg_base), (word_offset)) +#define ASM_STORE32_REG_REG(as, reg_src, reg_base) ASM_STORE32_REG_REG_OFFSET((as), (reg_src), (reg_base), 0) +#define ASM_STORE32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_store_reg_reg_offset((as), (reg_dest), (reg_base), (word_offset), 2) #define ASM_STORE32_REG_REG_REG(as, reg_val, reg_base, reg_index) \ do { \ asm_xtensa_op_addx4((as), (reg_base), (reg_index), (reg_base)); \ diff --git a/py/emitnative.c b/py/emitnative.c index 36e9719db4..f3ab483e8a 100644 --- a/py/emitnative.c +++ b/py/emitnative.c @@ -1534,12 +1534,6 @@ static void emit_native_load_subscr(emit_t *emit) { #ifdef ASM_LOAD8_REG_REG_OFFSET ASM_LOAD8_REG_REG_OFFSET(emit->as, REG_RET, reg_base, index_value); #else - #if N_XTENSA || N_XTENSAWIN - if (index_value >= 0 && index_value < 256) { - asm_xtensa_op_l8ui(emit->as, REG_RET, reg_base, index_value); - break; - } - #endif if (index_value != 0) { // index is non-zero need_reg_single(emit, reg_index, 0); @@ -1776,12 +1770,6 @@ static void emit_native_store_subscr(emit_t *emit) { #ifdef ASM_STORE8_REG_REG_OFFSET ASM_STORE8_REG_REG_OFFSET(emit->as, reg_value, reg_base, index_value); #else - #if N_XTENSA || N_XTENSAWIN - if (index_value >= 0 && index_value < 256) { - asm_xtensa_op_s8i(emit->as, reg_value, reg_base, index_value); - break; - } - #endif if (index_value != 0) { // index is non-zero ASM_MOV_REG_IMM(emit->as, reg_index, index_value); @@ -1797,12 +1785,6 @@ static void emit_native_store_subscr(emit_t *emit) { #ifdef ASM_STORE16_REG_REG_OFFSET ASM_STORE16_REG_REG_OFFSET(emit->as, reg_value, reg_base, index_value); #else - #if N_XTENSA || N_XTENSAWIN - if (index_value >= 0 && index_value < 256) { - asm_xtensa_op_s16i(emit->as, reg_value, reg_base, index_value); - break; - } - #endif if (index_value != 0) { // index is a non-zero immediate ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);