py/emitnative: Refactor Viper register-indexed load/stores.

This commit cleans up the Viper code generation blocks for
register-indexed load and store operations.

An attempt is made to simplify the code in the common code generator
code block, by moving architecture-specific code to the appropriate
native generation backends whenever possible.  This should make that
specific bit of code in the Viper generator clearer and easier to
maintain in the long term.

To achieve this, six generic assembler meta-opcodes have been
introduced, named `ASM_{LOAD,STORE}{8,16,32}_REG_REG_REG`.  A
platform-independent implementation for those operations is provided, so
backends that cannot emit a shorter sequence for the requested operation
or are fine with the platform-independent implementation can just not
provide said meta-opcodes.

Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
This commit is contained in:
Alessandro Gatti
2025-05-07 22:41:33 +02:00
parent 04c6b99cb9
commit b6d269ee32
6 changed files with 98 additions and 95 deletions

View File

@@ -411,12 +411,32 @@ void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx);
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), (uint16_offset))
#define ASM_LOAD16_REG_REG_REG(as, reg_dest, reg_base, reg_index) \
do { \
asm_xtensa_op_addx2((as), (reg_base), (reg_index), (reg_base)); \
asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0); \
} while (0)
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD32_REG_REG_REG(as, reg_dest, reg_base, reg_index) \
do { \
asm_xtensa_op_addx4((as), (reg_base), (reg_index), (reg_base)); \
asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0); \
} while (0)
#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_s32i_optimised((as), (reg_dest), (reg_base), (word_offset))
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s8i((as), (reg_src), (reg_base), 0)
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s16i((as), (reg_src), (reg_base), 0)
#define ASM_STORE16_REG_REG_REG(as, reg_val, reg_base, reg_index) \
do { \
asm_xtensa_op_addx2((as), (reg_base), (reg_index), (reg_base)); \
asm_xtensa_op_s16i((as), (reg_val), (reg_base), 0); \
} while (0)
#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s32i_n((as), (reg_src), (reg_base), 0)
#define ASM_STORE32_REG_REG_REG(as, reg_val, reg_base, reg_index) \
do { \
asm_xtensa_op_addx4((as), (reg_base), (reg_index), (reg_base)); \
asm_xtensa_op_s32i_n((as), (reg_val), (reg_base), 0); \
} while (0)
#endif // GENERIC_ASM_API