py/emitnative: Let Viper int-indexed code use appropriate operands.

This commit extends the generic ASM API by adding the rest of the
ASM_{LOAD,STORE}[size]_REG_REG_OFFSET macros whenever applicable.

The Viper int-indexed load/store code generator was changed to use those
API functions if they are available, falling back to backend-specific
implementations if possible and ultimately to a generic implementation.

Right now all backends except for x64 implement load16, load32, and
store32 operations (x64 only implements load16).

Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
This commit is contained in:
Alessandro Gatti
2025-05-22 12:20:43 +02:00
committed by Damien George
parent e43a3849d9
commit 78ee1bac60
6 changed files with 46 additions and 12 deletions

View File

@@ -1537,6 +1537,9 @@ static void emit_native_load_subscr(emit_t *emit) {
switch (vtype_base) {
case VTYPE_PTR8: {
// pointer to 8-bit memory
#ifdef ASM_LOAD8_REG_REG_OFFSET
ASM_LOAD8_REG_REG_OFFSET(emit->as, REG_RET, reg_base, index_value);
#else
#if N_THUMB
if (index_value >= 0 && index_value < 32) {
asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
@@ -1561,10 +1564,14 @@ static void emit_native_load_subscr(emit_t *emit) {
reg_base = reg_index;
}
ASM_LOAD8_REG_REG(emit->as, REG_RET, reg_base); // load from (base+index)
#endif
break;
}
case VTYPE_PTR16: {
// pointer to 16-bit memory
#ifdef ASM_LOAD16_REG_REG_OFFSET
ASM_LOAD16_REG_REG_OFFSET(emit->as, REG_RET, reg_base, index_value);
#else
#if N_THUMB
if (index_value >= 0 && index_value < 32) {
asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
@@ -1589,10 +1596,14 @@ static void emit_native_load_subscr(emit_t *emit) {
reg_base = reg_index;
}
ASM_LOAD16_REG_REG(emit->as, REG_RET, reg_base); // load from (base+2*index)
#endif
break;
}
case VTYPE_PTR32: {
// pointer to 32-bit memory
#ifdef ASM_LOAD32_REG_REG_OFFSET
ASM_LOAD32_REG_REG_OFFSET(emit->as, REG_RET, reg_base, index_value);
#else
#if N_THUMB
if (index_value >= 0 && index_value < 32) {
asm_thumb_ldr_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
@@ -1617,6 +1628,7 @@ static void emit_native_load_subscr(emit_t *emit) {
reg_base = reg_index;
}
ASM_LOAD32_REG_REG(emit->as, REG_RET, reg_base); // load from (base+4*index)
#endif
break;
}
default:
@@ -1809,6 +1821,9 @@ static void emit_native_store_subscr(emit_t *emit) {
switch (vtype_base) {
case VTYPE_PTR8: {
// pointer to 8-bit memory
#ifdef ASM_STORE8_REG_REG_OFFSET
ASM_STORE8_REG_REG_OFFSET(emit->as, reg_value, reg_base, index_value);
#else
// TODO optimise to use thumb strb r1, [r2, r3]
#if N_THUMB
if (index_value >= 0 && index_value < 32) {
@@ -1837,10 +1852,14 @@ static void emit_native_store_subscr(emit_t *emit) {
reg_base = reg_index;
}
ASM_STORE8_REG_REG(emit->as, reg_value, reg_base); // store value to (base+index)
#endif
break;
}
case VTYPE_PTR16: {
// pointer to 16-bit memory
#ifdef ASM_STORE16_REG_REG_OFFSET
ASM_STORE16_REG_REG_OFFSET(emit->as, reg_value, reg_base, index_value);
#else
#if N_THUMB
if (index_value >= 0 && index_value < 32) {
asm_thumb_strh_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
@@ -1864,10 +1883,14 @@ static void emit_native_store_subscr(emit_t *emit) {
reg_base = reg_index;
}
ASM_STORE16_REG_REG(emit->as, reg_value, reg_base); // store value to (base+2*index)
#endif
break;
}
case VTYPE_PTR32: {
// pointer to 32-bit memory
#ifdef ASM_STORE32_REG_REG_OFFSET
ASM_STORE32_REG_REG_OFFSET(emit->as, reg_value, reg_base, index_value);
#else
#if N_THUMB
if (index_value >= 0 && index_value < 32) {
asm_thumb_str_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
@@ -1896,6 +1919,7 @@ static void emit_native_store_subscr(emit_t *emit) {
reg_base = reg_index;
}
ASM_STORE32_REG_REG(emit->as, reg_value, reg_base); // store value to (base+4*index)
#endif
break;
}
default: