8000 py/asm: Fix x86 and ARM assemblers due to recent code refactoring. · Felipeasg/micropython@93ee660 · GitHub
[go: up one dir, main page]

Skip to content

Commit 93ee660

Browse files
committed
py/asm: Fix x86 and ARM assemblers due to recent code refactoring.
1 parent 155fdc7 commit 93ee660

File tree

2 files changed

+10
-10
lines changed

2 files changed

+10
-10
lines changed

py/asmarm.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
#define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
4040

4141
void asm_arm_end_pass(asm_arm_t *as) {
42-
if (as->pass == ASM_ARM_PASS_EMIT) {
42+
if (as->base.pass == ASM_ARM_PASS_EMIT) {
4343
#ifdef __arm__
4444
// flush I- and D-cache
4545
asm volatile(
@@ -333,9 +333,9 @@ void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
333333
}
334334

335335
void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
336-
assert(label < as->max_num_labels);
337-
mp_uint_t dest = as->label_offsets[label];
338-
mp_int_t rel = dest - as->code_offset;
336+
assert(label < as->base.max_num_labels);
337+
mp_uint_t dest = as->base.label_offsets[label];
338+
mp_int_t rel = dest - as->base.code_offset;
339339
rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
340340
rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
341341

py/asmx86.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) {
232232
// src_i32 is stored as a full word in the code, and aligned to machine-word boundary
233233
void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32) {
234234
// mov instruction uses 1 byte for the instruction, before the i32
235-
while (((as->code_offset + 1) & (WORD_SIZE - 1)) != 0) {
235+
while (((as->base.code_offset + 1) & (WORD_SIZE - 1)) != 0) {
236236
asm_x86_nop(as);
237237
}
238238
asm_x86_mov_i32_to_r32(as, src_i32, dest_r32);
@@ -339,13 +339,13 @@ void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) {
339339
}
340340

341341
STATIC mp_uint_t get_label_dest(asm_x86_t *as, mp_uint_t label) {
342-
assert(label < as->max_num_labels);
343-
return as->label_offsets[label];
342+
assert(label < as->base.max_num_labels);
343+
return as->base.label_offsets[label];
344344
}
345345

346346
void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
347347
mp_uint_t dest = get_label_dest(as, label);
348-
mp_int_t rel = dest - as->code_offset;
348+
mp_int_t rel = dest - as->base.code_offset;
349349
if (dest != (mp_uint_t)-1 && rel < 0) {
350350
// is a backwards jump, so we know the size of the jump on the first pass
351351
// calculate rel assuming 8 bit relative jump
@@ -367,7 +367,7 @@ void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
367367

368368
void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
369369
mp_uint_t dest = get_label_dest(as, label);
370-
mp_int_t rel = dest - as->code_offset;
370+
mp_int_t rel = dest - as->base.code_offset;
371371
if (dest != (mp_uint_t)-1 && rel < 0) {
372372
// is a backwards jump, so we know the size of the jump on the first pass
373373
// calculate rel assuming 8 bit relative jump
@@ -499,7 +499,7 @@ void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32)
499499
// this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all
500500
/*
501501
asm_x86_write_byte_1(as, OPCODE_CALL_REL32);
502-
asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4));
502+
asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->base.code_offset + 4));
503503
*/
504504

505505
// the caller must clean up the stack

0 commit comments

Comments
 (0)
0