diff --git a/src/dynarec/arm64/dynarec_arm64_66.c b/src/dynarec/arm64/dynarec_arm64_66.c index 1119f0a12..1e1daeaf0 100644 --- a/src/dynarec/arm64/dynarec_arm64_66.c +++ b/src/dynarec/arm64/dynarec_arm64_66.c @@ -706,13 +706,11 @@ uintptr_t dynarec64_66(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin if(MODREG) { // reg <= reg? that's an invalid operation DEFAULT; } else { // mem <= reg - addr = geted(dyn, addr, ninst, nextop, &ed, gd, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0); - if(gd!=ed) { // it's sometimes used as a 3 bytes NOP - if(rex.w) - MOVx_REG(gd, ed); - else - BFIx(gd, ed, 0, 16); - } + addr = geted(dyn, addr, ninst, nextop, &ed, x3, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0); + if (rex.w) + MOVx_REG(gd, ed); + else + BFIx(gd, ed, 0, 16); } break; case 0x8E: diff --git a/src/dynarec/arm64/dynarec_arm64_f0.c b/src/dynarec/arm64/dynarec_arm64_f0.c index 491bbe3c9..b90aaa3b9 100644 --- a/src/dynarec/arm64/dynarec_arm64_f0.c +++ b/src/dynarec/arm64/dynarec_arm64_f0.c @@ -380,7 +380,7 @@ uintptr_t dynarec64_F0(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin // done if (!rex.w) { B_MARK_nocond; } MOVxw_REG(xRAX, x1); - if(!ALIGNED_ATOMICxw && rex.w) { + if (!ALIGNED_ATOMICxw) { B_MARK_nocond; } } diff --git a/src/dynarec/la64/dynarec_la64_f0.c b/src/dynarec/la64/dynarec_la64_f0.c index a45508bee..a65947027 100644 --- a/src/dynarec/la64/dynarec_la64_f0.c +++ b/src/dynarec/la64/dynarec_la64_f0.c @@ -194,7 +194,7 @@ uintptr_t dynarec64_F0(dynarec_la64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BEQZ_MARKLOCK(x4); if (!rex.w) { B_MARK_nocond; } MVxw(xRAX, x1); - if (rex.w) { B_MARK_nocond; } + B_MARK_nocond; MARK3; // Unaligned ADDI_D(x5, xZR, -(1 << (rex.w + 2))); diff --git a/src/dynarec/rv64/dynarec_rv64_f0.c b/src/dynarec/rv64/dynarec_rv64_f0.c index 5f1db176e..544a8c119 100644 --- a/src/dynarec/rv64/dynarec_rv64_f0.c +++ b/src/dynarec/rv64/dynarec_rv64_f0.c @@ -267,7 +267,7 @@ uintptr_t dynarec64_F0(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni BNEZ_MARKLOCK(x4); if (!rex.w) { B_MARK_nocond; } MVxw(xRAX, x1); - if (rex.w) { B_MARK_nocond; } + B_MARK_nocond; MARK3; // Unaligned ANDI(x5, wback, -(1 << (rex.w + 2)));