[DYNAREC] Added ranged Dynablock dump (#2570)

This commit is contained in:
Yang Liu 2025-04-24 16:37:24 +08:00 committed by GitHub
parent 4903177bab
commit 926e4b2da8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 141 additions and 114 deletions

View File

@ -390,6 +390,12 @@ Enable DynaRec dump.
* 1: Dump DynaRec blocks.
* 2: Dump DynaRec blocks with some colors.
### BOX64_DYNAREC_DUMP_RANGE
Dump DynaRec blocks in the specified range.
* 0xXXXXXXXX-0xYYYYYYYY: Define the range where dynablock gets dumped (inclusive-exclusive).
### BOX64_DYNAREC_GDBJIT
The GDBJIT debugging support, only available on build with `-DGDBJIT=ON`, enable it with gdb command: jit-reader-load /usr/local/lib/libbox64gdbjitreader.so.

View File

@ -197,6 +197,13 @@ Enable DynaRec dump.
* 2 : Dump DynaRec blocks with some colors.
=item B<BOX64_DYNAREC_DUMP_RANGE> =I<0xXXXXXXXX-0xYYYYYYYY>
Dump DynaRec blocks in the specified range.
* 0xXXXXXXXX-0xYYYYYYYY : Define the range where dynablock gets dumped (inclusive-exclusive).
=item B<BOX64_DYNAREC_FASTNAN> =I<0|1>
Enable or disable fast NaN handling.

View File

@ -325,6 +325,18 @@
}
]
},
{
"name": "BOX64_DYNAREC_DUMP_RANGE",
"description": "Dump DynaRec blocks in the specified range.",
"category": "Debugging",
"options": [
{
"key": "0xXXXXXXXX-0xYYYYYYYY",
"description": "Define the range where dynablock gets dumped (inclusive-exclusive).",
"default": false
}
]
},
{
"name": "BOX64_DYNAREC_FASTNAN",
"description": "Enable or disable fast NaN handling.",

View File

@ -145,8 +145,8 @@ uintptr_t dynarec64_67_AVX(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int
}
else {DEFAULT;}
if((*ok==-1) && (BOX64ENV(dynarec_log)>=LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing)==1))
if(!dyn->size || BOX64ENV(dynarec_log)>LOG_INFO || BOX64DRENV(dynarec_dump)) {
if((*ok==-1) && (BOX64ENV(dynarec_log)>=LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing)==1))
if(!dyn->size || BOX64ENV(dynarec_log)>LOG_INFO || dyn->need_dump) {
dynarec_log(LOG_NONE, "Dynarec unimplemented AVX opcode size %d prefix %s map %s opcode %02X ", 128<<vex.l, avx_prefix_string(vex.p), avx_map_string(vex.m), opcode);
}
return addr;

View File

@ -69,8 +69,8 @@ uintptr_t dynarec64_AVX(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ni
addr = dynarec64_AVX_F3_0F38(dyn, addr, ip, ninst, vex, ok, need_epilog);
else {DEFAULT;}
if((*ok==-1) && (BOX64ENV(dynarec_log)>=LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing)==1))
if(!dyn->size || BOX64ENV(dynarec_log)>LOG_INFO || BOX64DRENV(dynarec_dump)) {
if((*ok==-1) && (BOX64ENV(dynarec_log)>=LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing)==1))
if(!dyn->size || BOX64ENV(dynarec_log)>LOG_INFO || dyn->need_dump) {
dynarec_log(LOG_NONE, "Dynarec unimplemented AVX opcode size %d prefix %s map %s opcode %02X ", 128<<vex.l, avx_prefix_string(vex.p), avx_map_string(vex.m), opcode);
}
return addr;

View File

@ -295,7 +295,7 @@ static void neoncache_promote_double_combined(dynarec_arm_t* dyn, int ninst, int
} else
a = dyn->insts[ninst].n.combined1;
int i = neoncache_get_st_f_i64_noback(dyn, ninst, a);
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].n.combined2)?'2':'1', a ,i, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].n.combined2)?'2':'1', a ,i, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop);
if(i>=0) {
dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D;
if(dyn->insts[ninst].x87precision) dyn->need_x87check = 2;
@ -313,20 +313,20 @@ static void neoncache_promote_double_internal(dynarec_arm_t* dyn, int ninst, int
while(ninst>=0) {
a+=dyn->insts[ninst].n.stack_pop; // adjust Stack depth: add pop'd ST (going backward)
int i = neoncache_get_st_f_i64(dyn, ninst, a);
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, i);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, i);
if(i<0) return;
dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D;
if(dyn->insts[ninst].x87precision) dyn->need_x87check = 2;
// check combined propagation too
if(dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) {
if(dyn->insts[ninst].n.swapped) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
if (a==dyn->insts[ninst].n.combined1)
a = dyn->insts[ninst].n.combined2;
else if (a==dyn->insts[ninst].n.combined2)
a = dyn->insts[ninst].n.combined1;
} else {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
neoncache_promote_double_combined(dyn, ninst, maxinst, a);
}
}
@ -342,20 +342,20 @@ static void neoncache_promote_double_forward(dynarec_arm_t* dyn, int ninst, int
while((ninst!=-1) && (ninst<maxinst) && (a>=0)) {
a+=dyn->insts[ninst].n.stack_push; // // adjust Stack depth: add push'd ST (going forward)
if((dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) && dyn->insts[ninst].n.swapped) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
if (a==dyn->insts[ninst].n.combined1)
a = dyn->insts[ninst].n.combined2;
else if (a==dyn->insts[ninst].n.combined2)
a = dyn->insts[ninst].n.combined1;
}
int i = neoncache_get_st_f_i64_noback(dyn, ninst, a);
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop, i);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].n.stack, dyn->insts[ninst].n.stack_next, dyn->insts[ninst].n.stack_push, -dyn->insts[ninst].n.stack_pop, i);
if(i<0) return;
dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D;
if(dyn->insts[ninst].x87precision) dyn->need_x87check = 2;
// check combined propagation too
if((dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) && !dyn->insts[ninst].n.swapped) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].n.combined1 ,dyn->insts[ninst].n.combined2, a, dyn->insts[ninst].n.stack);
neoncache_promote_double_combined(dyn, ninst, maxinst, a);
}
a-=dyn->insts[ninst].n.stack_pop; // adjust Stack depth: remove pop'd ST (going forward)
@ -371,7 +371,7 @@ static void neoncache_promote_double_forward(dynarec_arm_t* dyn, int ninst, int
void neoncache_promote_double(dynarec_arm_t* dyn, int ninst, int a)
{
int i = neoncache_get_current_st_f_i64(dyn, a);
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->n.stack, i);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->n.stack, i);
if(i<0) return;
dyn->n.neoncache[i].t = NEON_CACHE_ST_D;
dyn->insts[ninst].n.neoncache[i].t = NEON_CACHE_ST_D;
@ -379,13 +379,13 @@ void neoncache_promote_double(dynarec_arm_t* dyn, int ninst, int a)
// check combined propagation too
if(dyn->n.combined1 || dyn->n.combined2) {
if(dyn->n.swapped) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a);
if(dyn->n.combined1 == a)
a = dyn->n.combined2;
else if(dyn->n.combined2 == a)
a = dyn->n.combined1;
} else {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "neoncache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->n.combined1 ,dyn->n.combined2, a);
if(dyn->n.combined1 == a)
neoncache_promote_double(dyn, ninst, dyn->n.combined2);
else if(dyn->n.combined2 == a)
@ -756,7 +756,7 @@ static register_mapping_t register_mappings[] = {
void printf_x64_instruction(dynarec_native_t* dyn, zydis_dec_t* dec, instruction_x64_t* inst, const char* name);
void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t rex)
{
if (!BOX64DRENV(dynarec_dump) && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return;
if (!dyn->need_dump && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return;
static char buf[256];
int length = sprintf(buf, "barrier=%d state=%d/%d/%d(%d:%d->%d:%d), %s=%X/%X, use=%X, need=%X/%X, sm=%d(%d/%d)",
@ -827,13 +827,13 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r
}
}
if (memcmp(dyn->insts[ninst].n.neoncache, dyn->n.neoncache, sizeof(dyn->n.neoncache))) {
length += sprintf(buf + length, " %s(Change:", (BOX64DRENV(dynarec_dump) > 1) ? "\e[1;91m" : "");
length += sprintf(buf + length, " %s(Change:", (dyn->need_dump > 1) ? "\e[1;91m" : "");
for (int ii = 0; ii < 32; ++ii)
if (dyn->insts[ninst].n.neoncache[ii].v != dyn->n.neoncache[ii].v) {
length += sprintf(buf + length, " V%d:%s", ii, getCacheName(dyn->n.neoncache[ii].t, dyn->n.neoncache[ii].n));
length += sprintf(buf + length, "->%s", getCacheName(dyn->insts[ninst].n.neoncache[ii].t, dyn->insts[ninst].n.neoncache[ii].n));
}
length += sprintf(buf + length, ")%s", (BOX64DRENV(dynarec_dump) > 1) ? "\e[0;32m" : "");
length += sprintf(buf + length, ")%s", (dyn->need_dump > 1) ? "\e[0;32m" : "");
}
if (dyn->insts[ninst].n.ymm_used) {
length += sprintf(buf + length, " ymmUsed=%04x", dyn->insts[ninst].n.ymm_used);
@ -850,11 +850,11 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r
if (dyn->insts[ninst].n.combined1 || dyn->insts[ninst].n.combined2) {
length += sprintf(buf + length, " %s:%d/%d", dyn->insts[ninst].n.swapped ? "SWP" : "CMB", dyn->insts[ninst].n.combined1, dyn->insts[ninst].n.combined2);
}
if (BOX64DRENV(dynarec_dump)) {
if (dyn->need_dump) {
printf_x64_instruction(dyn, rex.is32bits ? my_context->dec32 : my_context->dec, &dyn->insts[ninst].x64, name);
dynarec_log(LOG_NONE, "%s%p: %d emitted opcodes, inst=%d, %s%s\n",
(BOX64DRENV(dynarec_dump) > 1) ? "\e[32m" : "",
(void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (BOX64DRENV(dynarec_dump) > 1) ? "\e[m" : "");
(dyn->need_dump > 1) ? "\e[32m" : "",
(void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (dyn->need_dump > 1) ? "\e[m" : "");
}
if (BOX64ENV(dynarec_gdbjit)) {
static char buf2[512];

View File

@ -2682,10 +2682,10 @@ void fpu_reset_cache(dynarec_arm_t* dyn, int ninst, int reset_n)
dyn->ymm_zero = dyn->insts[reset_n].ymm0_out;
#endif
#if STEP == 0
if(BOX64DRENV(dynarec_dump) && dyn->n.x87stack) dynarec_log(LOG_NONE, "New x87stack=%d at ResetCache in inst %d with %d\n", dyn->n.x87stack, ninst, reset_n);
if(dyn->need_dump && dyn->n.x87stack) dynarec_log(LOG_NONE, "New x87stack=%d at ResetCache in inst %d with %d\n", dyn->n.x87stack, ninst, reset_n);
#endif
#if defined(HAVE_TRACE) && (STEP>2)
if(BOX64DRENV(dynarec_dump) && 0) //disable for now, need more work
if(dyn->need_dump && 0) //disable for now, need more work
if(memcmp(&dyn->n, &dyn->insts[reset_n].n, sizeof(neoncache_t))) {
MESSAGE(LOG_DEBUG, "Warning, difference in neoncache: reset=");
for(int i=0; i<32; ++i)

View File

@ -1095,7 +1095,7 @@
#else
#define X87_PUSH_OR_FAIL(var, dyn, ninst, scratch, t) \
if ((dyn->n.x87stack==8) || (dyn->n.pushed==8)) { \
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \
if(dyn->need_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \
dyn->abort = 1; \
return addr; \
} \
@ -1103,7 +1103,7 @@
#define X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, scratch) \
if ((dyn->n.x87stack==8) || (dyn->n.pushed==8)) { \
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \
if(dyn->need_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.pushed, ninst); \
dyn->abort = 1; \
return addr; \
} \
@ -1111,7 +1111,7 @@
#define X87_POP_OR_FAIL(dyn, ninst, scratch) \
if ((dyn->n.x87stack==-8) || (dyn->n.poped==8)) { \
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.poped, ninst); \
if(dyn->need_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->n.x87stack, dyn->n.poped, ninst); \
dyn->abort = 1; \
return addr; \
} \

View File

@ -39,8 +39,8 @@
--dyn->size; \
*ok = -1; \
if (ninst) { dyn->insts[ninst - 1].x64.size = ip - dyn->insts[ninst - 1].x64.addr; } \
if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 1) \
if (!dyn->size || BOX64ENV(dynarec_log) > LOG_INFO || BOX64DRENV(dynarec_dump)) { \
if (BOX64ENV(dynarec_log) >= LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing) == 1) \
if (!dyn->size || BOX64ENV(dynarec_log) > LOG_INFO || dyn->need_dump) { \
dynarec_log(LOG_NONE, "%p: Dynarec stopped because of %s Opcode ", (void*)ip, rex.is32bits ? "x86" : "x64"); \
zydis_dec_t* dec = rex.is32bits ? my_context->dec32 : my_context->dec; \
if (dec) { \

View File

@ -5,7 +5,7 @@
addInst(dyn->instsize, &dyn->insts_size, 0, 0);
#define EMIT(A) \
do{ \
if(BOX64DRENV(dynarec_dump)) print_opcode(dyn, ninst, (uint32_t)(A)); \
if(dyn->need_dump) print_opcode(dyn, ninst, (uint32_t)(A)); \
if((uintptr_t)dyn->block<dyn->tablestart) \
*(uint32_t*)(dyn->block) = (uint32_t)(A); \
dyn->block += 4; dyn->native_size += 4; \
@ -14,7 +14,7 @@
#define MESSAGE(A, ...) \
do { \
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, __VA_ARGS__); \
if (dyn->need_dump) dynarec_log(LOG_NONE, __VA_ARGS__); \
} while (0)
#define NEW_INST \
if(ninst) { \
@ -27,4 +27,4 @@
#define TABLE64(A, V) {int val64offset = Table64(dyn, (V), 3); MESSAGE(LOG_DUMP, " Table64: 0x%lx\n", (V)); LDRx_literal(A, val64offset);}
#define FTABLE64(A, V) {mmx87_regs_t v = {.d = V}; int val64offset = Table64(dyn, v.q, 3); MESSAGE(LOG_DUMP, " FTable64: %g\n", v.d); VLDR64_literal(A, val64offset);}
#define CALLRET_RET() do {dyn->callrets[dyn->callret_size].type = 0; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)
#define CALLRET_LOOP() do {dyn->callrets[dyn->callret_size].type = 1; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)
#define CALLRET_LOOP() do {dyn->callrets[dyn->callret_size].type = 1; dyn->callrets[dyn->callret_size++].offs = dyn->native_size; EMIT(ARCH_NOP); } while(0)

View File

@ -173,6 +173,7 @@ typedef struct dynarec_arm_s {
uint8_t abort; // abort the creation of the block
void* gdbjit_block;
uint32_t need_x87check; // needs x87 precision control check if non-null, or 0 if not
uint32_t need_dump; // need to dump the block
} dynarec_arm_t;
void add_next(dynarec_arm_t *dyn, uintptr_t addr);

View File

@ -29,15 +29,15 @@ void printf_x64_instruction(dynarec_native_t* dyn, zydis_dec_t* dec, instruction
if (ip[0] == 0xcc && IsBridgeSignature(ip[1], ip[2])) {
uintptr_t a = *(uintptr_t*)(ip+3);
if(a==0) {
dynarec_log(LOG_NONE, "%s%p: Exit x64emu%s\n", (BOX64DRENV(dynarec_dump)>1)?"\e[01;33m":"", (void*)ip, (BOX64DRENV(dynarec_dump)>1)?"\e[m":"");
dynarec_log(LOG_NONE, "%s%p: Exit x64emu%s\n", (dyn->need_dump>1)?"\e[01;33m":"", (void*)ip, (dyn->need_dump>1)?"\e[m":"");
} else {
dynarec_log(LOG_NONE, "%s%p: Native call to %p%s\n", (BOX64DRENV(dynarec_dump)>1)?"\e[01;33m":"", (void*)ip, (void*)a, (BOX64DRENV(dynarec_dump)>1)?"\e[m":"");
dynarec_log(LOG_NONE, "%s%p: Native call to %p%s\n", (dyn->need_dump>1)?"\e[01;33m":"", (void*)ip, (void*)a, (dyn->need_dump>1)?"\e[m":"");
}
} else {
if(dec) {
dynarec_log(LOG_NONE, "%s%p: %s", (BOX64DRENV(dynarec_dump) > 1) ? "\e[01;33m" : "", ip, DecodeX64Trace(dec, inst->addr, 1));
dynarec_log(LOG_NONE, "%s%p: %s", (dyn->need_dump > 1) ? "\e[01;33m" : "", ip, DecodeX64Trace(dec, inst->addr, 1));
} else {
dynarec_log(LOG_NONE, "%s%p: ", (BOX64DRENV(dynarec_dump)>1)?"\e[01;33m":"", ip);
dynarec_log(LOG_NONE, "%s%p: ", (dyn->need_dump>1)?"\e[01;33m":"", ip);
for(int i=0; i<inst->size; ++i) {
dynarec_log_prefix(0, LOG_NONE, "%02X ", ip[i]);
}
@ -54,7 +54,7 @@ void printf_x64_instruction(dynarec_native_t* dyn, zydis_dec_t* dec, instruction
}
}
// end of line and colors
dynarec_log_prefix(0, LOG_NONE, "%s\n", (BOX64DRENV(dynarec_dump)>1)?"\e[m":"");
dynarec_log_prefix(0, LOG_NONE, "%s\n", (dyn->need_dump>1)?"\e[m":"");
}
}
@ -421,7 +421,7 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached)
int can_incr = ninst == max_ninst_reached; // Are we the top-level call?
int ok = 1;
while ((can_incr || ok) && ninst<dyn->size) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "update ninst=%d (%d): can_incr=%d\n", ninst, max_ninst_reached, can_incr);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "update ninst=%d (%d): can_incr=%d\n", ninst, max_ninst_reached, can_incr);
uint16_t new_purge_ymm, new_ymm0_in, new_ymm0_out;
if (dyn->insts[ninst].pred_sz && dyn->insts[ninst].x64.alive) {
@ -432,18 +432,18 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached)
uint16_t ymm0_inter = (ninst && !(dyn->insts[ninst].x64.barrier & BARRIER_FLOAT)) ? ((uint16_t)-1) : (uint16_t)0;
for (int i = 0; i < dyn->insts[ninst].pred_sz; ++i) {
int pred = dyn->insts[ninst].pred[i];
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "\twith pred[%d] = %d", i, pred);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "\twith pred[%d] = %d", i, pred);
if (pred >= max_ninst_reached) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " (skipped)\n");
//if(dyn->need_dump) dynarec_log(LOG_NONE, " (skipped)\n");
continue;
}
int pred_out = dyn->insts[pred].x64.has_callret ? 0 : dyn->insts[pred].ymm0_out;
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " ~> %04X\n", pred_out);
//if(dyn->need_dump) dynarec_log(LOG_NONE, " ~> %04X\n", pred_out);
ymm0_union |= pred_out;
ymm0_inter &= pred_out;
}
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "\t=> %04X,%04X\n", ymm0_union, ymm0_inter);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "\t=> %04X,%04X\n", ymm0_union, ymm0_inter);
// Notice the default values yield something coherent here (if all pred are after ninst)
new_purge_ymm = ymm0_union & ~ymm0_inter;
new_ymm0_in = ymm0_inter;
@ -465,7 +465,7 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached)
int jmp = (dyn->insts[ninst].x64.jmp)?dyn->insts[ninst].x64.jmp_insts:-1;
if((jmp!=-1) && (jmp < max_ninst_reached)) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp);
// The jump goes before the last instruction reached, update the destination
// If this is the top level call, this means the jump goes backward (jmp != ninst)
// Otherwise, since we don't update all instructions, we may miss the update (don't use jmp < ninst)
@ -479,7 +479,7 @@ static void updateYmm0s(dynarec_native_t* dyn, int ninst, int max_ninst_reached)
// Also update jumps to before (they are skipped otherwise)
int jmp = (dyn->insts[ninst].x64.jmp)?dyn->insts[ninst].x64.jmp_insts:-1;
if((jmp!=-1) && (jmp < max_ninst_reached)) {
//if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp);
//if(dyn->need_dump) dynarec_log(LOG_NONE, "\t! jump to %d\n", jmp);
updateYmm0s(dyn, jmp, max_ninst_reached);
}
} else {
@ -632,7 +632,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
// pass 0, addresses, x64 jump addresses, overall size of the block
uintptr_t end = native_pass0(&helper, addr, alternate, is32bits, inst_max);
if(helper.abort) {
if(BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass0\n");
if(dyn->need_dump || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass0\n");
CancelBlock64(0);
return NULL;
}
@ -767,7 +767,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
// pass 1, float optimizations, first pass for flags
native_pass1(&helper, addr, alternate, is32bits, inst_max);
if(helper.abort) {
if(BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass1\n");
if(dyn->need_dump || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass1\n");
CancelBlock64(0);
return NULL;
}
@ -779,7 +779,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
helper.callrets = static_callrets;
native_pass2(&helper, addr, alternate, is32bits, inst_max);
if(helper.abort) {
if(BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass2\n");
if(dyn->need_dump || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass2\n");
CancelBlock64(0);
return NULL;
}
@ -795,7 +795,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
}
if(!imax) return NULL; //that should never happens
--imax;
if(BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Dynablock oversized, with %zu (max=%zd), recomputing cutting at %d from %d\n", native_size, MAXBLOCK_SIZE, imax, helper.size);
if(dyn->need_dump || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Dynablock oversized, with %zu (max=%zd), recomputing cutting at %d from %d\n", native_size, MAXBLOCK_SIZE, imax, helper.size);
CancelBlock64(0);
return FillBlock64(block, addr, alternate, is32bits, imax);
}
@ -832,10 +832,10 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
memcpy(helper.callrets, static_callrets, helper.callret_size*sizeof(callret_t));
helper.callret_size = 0;
// pass 3, emit (log emit native opcode)
if(BOX64DRENV(dynarec_dump)) {
dynarec_log(LOG_NONE, "%s%04d|Emitting %zu bytes for %u %s bytes (native=%zu, table64=%zu, instsize=%zu, arch=%zu, callrets=%zu)", (BOX64DRENV(dynarec_dump)>1)?"\e[01;36m":"", GetTID(), helper.native_size, helper.isize, is32bits?"x86":"x64", native_size, helper.table64size*sizeof(uint64_t), insts_rsize, arch_size, callret_size);
if(dyn->need_dump) {
dynarec_log(LOG_NONE, "%s%04d|Emitting %zu bytes for %u %s bytes (native=%zu, table64=%zu, instsize=%zu, arch=%zu, callrets=%zu)", (dyn->need_dump>1)?"\e[01;36m":"", GetTID(), helper.native_size, helper.isize, is32bits?"x86":"x64", native_size, helper.table64size*sizeof(uint64_t), insts_rsize, arch_size, callret_size);
PrintFunctionAddr(helper.start, " => ");
dynarec_log(LOG_NONE, "%s\n", (BOX64DRENV(dynarec_dump)>1)?"\e[m":"");
dynarec_log(LOG_NONE, "%s\n", (dyn->need_dump>1)?"\e[m":"");
}
if (BOX64ENV(dynarec_gdbjit) && (!BOX64ENV(dynarec_gdbjit_end) || (addr >= BOX64ENV(dynarec_gdbjit_start) && addr < BOX64ENV(dynarec_gdbjit_end)))) {
GdbJITNewBlock(helper.gdbjit_block, (GDB_CORE_ADDR)block->actual_block, (GDB_CORE_ADDR)block->actual_block + native_size, helper.start);
@ -849,7 +849,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
helper.insts_size = 0; // reset
native_pass3(&helper, addr, alternate, is32bits, inst_max);
if(helper.abort) {
if(BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass3\n");
if(dyn->need_dump || BOX64ENV(dynarec_log))dynarec_log(LOG_NONE, "Abort dynablock on pass3\n");
CancelBlock64(0);
return NULL;
}

View File

@ -146,6 +146,9 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
fpu_unwind_restore(dyn, ninst, &save);
MESSAGE(LOG_DUMP, "----------\n");
}
if (BOX64DRENV(dynarec_dump) && (!BOX64ENV(dynarec_dump_range_end) || (ip >= BOX64ENV(dynarec_dump_range_start) && ip < BOX64ENV(dynarec_dump_range_end)))) {
dyn->need_dump = BOX64DRENV(dynarec_dump);
}
#ifdef HAVE_TRACE
else if(my_context->dec && BOX64ENV(dynarec_trace)) {
if((trace_end == 0)
@ -242,7 +245,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
#else
// check if block need to be stopped, because it's a 00 00 opcode (unreadeable is already checked earlier)
if((ok>0) && !dyn->forward && !(*(uint32_t*)addr)) {
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "Stopping block at %p reason: %s\n", (void*)addr, "Next opcode is 00 00 00 00");
if (dyn->need_dump) dynarec_log(LOG_NONE, "Stopping block at %p reason: %s\n", (void*)addr, "Next opcode is 00 00 00 00");
ok = 0;
need_epilog = 1;
dyn->insts[ninst].x64.need_after |= X_PEND;
@ -251,7 +254,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
if(dyn->forward_to == addr && !need_epilog && ok>=0) {
// we made it!
reset_n = get_first_jump_addr(dyn, addr);
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "Forward extend block for %d bytes %s%p -> %p (ninst %d - %d)\n", dyn->forward_to-dyn->forward, dyn->insts[dyn->forward_ninst].x64.has_callret?"(opt. call) ":"", (void*)dyn->forward, (void*)dyn->forward_to, reset_n, ninst);
if (dyn->need_dump) dynarec_log(LOG_NONE, "Forward extend block for %d bytes %s%p -> %p (ninst %d - %d)\n", dyn->forward_to - dyn->forward, dyn->insts[dyn->forward_ninst].x64.has_callret ? "(opt. call) " : "", (void*)dyn->forward, (void*)dyn->forward_to, reset_n, ninst);
if(dyn->insts[dyn->forward_ninst].x64.has_callret && !dyn->insts[dyn->forward_ninst].x64.has_next)
dyn->insts[dyn->forward_ninst].x64.has_next = 1; // this block actually continue
dyn->forward = 0;
@ -261,7 +264,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
ok = 1; // in case it was 0
} else if ((dyn->forward_to < addr) || ok<=0) {
// something when wrong! rollback
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "Could not forward extend block for %d bytes %p -> %p\n", dyn->forward_to-dyn->forward, (void*)dyn->forward, (void*)dyn->forward_to);
if (dyn->need_dump) dynarec_log(LOG_NONE, "Could not forward extend block for %d bytes %p -> %p\n", dyn->forward_to - dyn->forward, (void*)dyn->forward, (void*)dyn->forward_to);
ok = 0;
dyn->size = dyn->forward_size;
ninst = dyn->forward_ninst;
@ -287,7 +290,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
// and pred table is not ready yet
reset_n = get_first_jump(dyn, next);
}
if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "Extend block %p, %s%p -> %p (ninst=%d, jump from %d)\n", dyn, dyn->insts[ninst].x64.has_callret?"(opt. call) ":"", (void*)addr, (void*)next, ninst+1, dyn->insts[ninst].x64.has_callret?ninst:reset_n);
if (dyn->need_dump) dynarec_log(LOG_NONE, "Extend block %p, %s%p -> %p (ninst=%d, jump from %d)\n", dyn, dyn->insts[ninst].x64.has_callret ? "(opt. call) " : "", (void*)addr, (void*)next, ninst + 1, dyn->insts[ninst].x64.has_callret ? ninst : reset_n);
} else if (next && (int)(next - addr) < BOX64ENV(dynarec_forward) && (getProtection(next) & PROT_READ) /*BOX64DRENV(dynarec_bigblock)>=stopblock*/) {
if (!((BOX64DRENV(dynarec_bigblock) < stopblock) && !isJumpTableDefault64((void*)next))) {
if(dyn->forward) {
@ -355,7 +358,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
}
#endif
MESSAGE(LOG_DEBUG, "Stopping block %p (%d / %d)\n",(void*)init_addr, ninst, dyn->size);
if(!BOX64DRENV(dynarec_dump) && addr>=BOX64ENV(nodynarec_start) && addr<BOX64ENV(nodynarec_end))
if (!dyn->need_dump && addr >= BOX64ENV(nodynarec_start) && addr < BOX64ENV(nodynarec_end))
dynarec_log(LOG_INFO, "Stopping block in no-dynarec zone\n");
--ninst;
if(!dyn->insts[ninst].x64.barrier) {

View File

@ -350,7 +350,7 @@ static register_mapping_t register_mappings[] = {
void printf_x64_instruction(dynarec_native_t* dyn, zydis_dec_t* dec, instruction_x64_t* inst, const char* name);
void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t rex)
{
if (!BOX64DRENV(dynarec_dump) && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return;
if (!dyn->need_dump && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return;
static char buf[256];
int length = sprintf(buf, "barrier=%d state=%d/%d(%d), %s=%X/%X, use=%X, need=%X/%X, fuse=%d, sm=%d(%d/%d)",
@ -395,11 +395,11 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r
if (dyn->insts[ninst].lsx.combined1 || dyn->insts[ninst].lsx.combined2)
length += sprintf(buf + length, " %s:%d/%d", dyn->insts[ninst].lsx.swapped ? "SWP" : "CMB", dyn->insts[ninst].lsx.combined1, dyn->insts[ninst].lsx.combined2);
if (BOX64DRENV(dynarec_dump)) {
if (dyn->need_dump) {
printf_x64_instruction(dyn, rex.is32bits ? my_context->dec32 : my_context->dec, &dyn->insts[ninst].x64, name);
dynarec_log(LOG_NONE, "%s%p: %d emitted opcodes, inst=%d, %s%s\n",
(BOX64DRENV(dynarec_dump) > 1) ? "\e[32m" : "",
(void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (BOX64DRENV(dynarec_dump) > 1) ? "\e[m" : "");
(dyn->need_dump > 1) ? "\e[32m" : "",
(void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (dyn->need_dump > 1) ? "\e[m" : "");
}
if (BOX64ENV(dynarec_gdbjit)) {
static char buf2[512];

View File

@ -55,7 +55,7 @@
--dyn->size; \
*ok = -1; \
if (ninst) { dyn->insts[ninst - 1].x64.size = ip - dyn->insts[ninst - 1].x64.addr; } \
if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 1) { \
if (BOX64ENV(dynarec_log) >= LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing) == 1) { \
dynarec_log(LOG_NONE, "%p: Dynarec stopped because of %s Opcode ", (void*)ip, rex.is32bits ? "x86" : "x64"); \
zydis_dec_t* dec = rex.is32bits ? my_context->dec32 : my_context->dec; \
if (dec) { \

View File

@ -5,7 +5,7 @@
addInst(dyn->instsize, &dyn->insts_size, 0, 0);
#define EMIT(A) \
do { \
if (BOX64DRENV(dynarec_dump)) print_opcode(dyn, ninst, (uint32_t)(A)); \
if (dyn->need_dump) print_opcode(dyn, ninst, (uint32_t)(A)); \
if ((uintptr_t)dyn->block < dyn->tablestart) \
*(uint32_t*)(dyn->block) = (uint32_t)(A); \
dyn->block += 4; \
@ -15,7 +15,7 @@
#define MESSAGE(A, ...) \
do { \
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, __VA_ARGS__); \
if (dyn->need_dump) dynarec_log(LOG_NONE, __VA_ARGS__); \
} while (0)
#define NEW_INST \
if (ninst) { \

View File

@ -148,6 +148,7 @@ typedef struct dynarec_la64_s {
uint8_t abort;
void* gdbjit_block;
uint32_t need_x87check; // x87 low precision check
uint32_t need_dump; // need to dump the block
} dynarec_la64_t;
void add_next(dynarec_la64_t *dyn, uintptr_t addr);

View File

@ -54,7 +54,7 @@ uintptr_t dynarec64_AVX(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int n
DEFAULT;
}
if ((*ok == -1) && (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 1)) {
if ((*ok == -1) && (BOX64ENV(dynarec_log) >= LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing) == 1)) {
dynarec_log(LOG_NONE, "Dynarec unimplemented AVX opcode size %d prefix %s map %s opcode %02X ", 128 << vex.l, avx_prefix_string(vex.p), avx_map_string(vex.m), opcode);
}
return addr;

View File

@ -217,7 +217,7 @@ static void extcache_promote_double_combined(dynarec_rv64_t* dyn, int ninst, int
} else
a = dyn->insts[ninst].e.combined1;
int i = extcache_get_st_f_i64_noback(dyn, ninst, a);
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].e.combined2)?'2':'1', a ,i, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_combined, ninst=%d combined%c %d i=%d (stack:%d/%d)\n", ninst, (a == dyn->insts[ninst].e.combined2)?'2':'1', a ,i, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop);
if (i >= 0) {
dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D;
if (dyn->insts[ninst].x87precision) dyn->need_x87check = 2;
@ -237,20 +237,20 @@ static void extcache_promote_double_internal(dynarec_rv64_t* dyn, int ninst, int
while (ninst >= 0) {
a += dyn->insts[ninst].e.stack_pop; // adjust Stack depth: add pop'd ST (going backward)
int i = extcache_get_st_f_i64(dyn, ninst, a);
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, i);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d, a=%d st=%d:%d, i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, i);
if (i < 0) return;
dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D;
if (dyn->insts[ninst].x87precision) dyn->need_x87check = 2;
// check combined propagation too
if (dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) {
if (dyn->insts[ninst].e.swapped) {
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
if (a == dyn->insts[ninst].e.combined1)
a = dyn->insts[ninst].e.combined2;
else if (a == dyn->insts[ninst].e.combined2)
a = dyn->insts[ninst].e.combined1;
} else {
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_internal, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
extcache_promote_double_combined(dyn, ninst, maxinst, a);
}
}
@ -266,20 +266,20 @@ static void extcache_promote_double_forward(dynarec_rv64_t* dyn, int ninst, int
while ((ninst != -1) && (ninst < maxinst) && (a >= 0)) {
a += dyn->insts[ninst].e.stack_push; // // adjust Stack depth: add push'd ST (going forward)
if ((dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) && dyn->insts[ninst].e.swapped) {
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d swapped %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
if (a == dyn->insts[ninst].e.combined1)
a = dyn->insts[ninst].e.combined2;
else if (a == dyn->insts[ninst].e.combined2)
a = dyn->insts[ninst].e.combined1;
}
int i = extcache_get_st_f_i64_noback(dyn, ninst, a);
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop, i);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d, a=%d st=%d:%d(%d/%d), i=%d\n", ninst, a, dyn->insts[ninst].e.stack, dyn->insts[ninst].e.stack_next, dyn->insts[ninst].e.stack_push, -dyn->insts[ninst].e.stack_pop, i);
if (i < 0) return;
dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D;
if (dyn->insts[ninst].x87precision) dyn->need_x87check = 2;
// check combined propagation too
if ((dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2) && !dyn->insts[ninst].e.swapped) {
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double_forward, ninst=%d combined %d/%d vs %d with st %d\n", ninst, dyn->insts[ninst].e.combined1 ,dyn->insts[ninst].e.combined2, a, dyn->insts[ninst].e.stack);
extcache_promote_double_combined(dyn, ninst, maxinst, a);
}
a -= dyn->insts[ninst].e.stack_pop; // adjust Stack depth: remove pop'd ST (going forward)
@ -295,7 +295,7 @@ static void extcache_promote_double_forward(dynarec_rv64_t* dyn, int ninst, int
void extcache_promote_double(dynarec_rv64_t* dyn, int ninst, int a)
{
int i = extcache_get_current_st_f_i64(dyn, a);
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->e.stack, i);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d a=%d st=%d i=%d\n", ninst, a, dyn->e.stack, i);
if (i < 0) return;
dyn->e.extcache[i].t = EXT_CACHE_ST_D;
dyn->insts[ninst].e.extcache[i].t = EXT_CACHE_ST_D;
@ -303,13 +303,13 @@ void extcache_promote_double(dynarec_rv64_t* dyn, int ninst, int a)
// check combined propagation too
if (dyn->e.combined1 || dyn->e.combined2) {
if (dyn->e.swapped) {
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d swapped! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a);
if (dyn->e.combined1 == a)
a = dyn->e.combined2;
else if (dyn->e.combined2 == a)
a = dyn->e.combined1;
} else {
// if(BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a);
// if(dyn->need_dump) dynarec_log(LOG_NONE, "extcache_promote_double, ninst=%d combined! %d/%d vs %d\n", ninst, dyn->e.combined1 ,dyn->e.combined2, a);
if (dyn->e.combined1 == a)
extcache_promote_double(dyn, ninst, dyn->e.combined2);
else if (dyn->e.combined2 == a)
@ -702,7 +702,7 @@ static register_mapping_t register_mappings[] = {
void printf_x64_instruction(dynarec_native_t* dyn, zydis_dec_t* dec, instruction_x64_t* inst, const char* name);
void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t rex)
{
if (!BOX64DRENV(dynarec_dump) && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return;
if (!dyn->need_dump && !BOX64ENV(dynarec_gdbjit) && !BOX64ENV(dynarec_perf_map)) return;
static char buf[256];
int length = sprintf(buf, "barrier=%d state=%d/%d(%d), %s=%X/%X, use=%X, need=%X/%X, fuse=%d, sm=%d(%d/%d), sew@entry=%d, sew@exit=%d",
@ -755,11 +755,11 @@ void inst_name_pass3(dynarec_native_t* dyn, int ninst, const char* name, rex_t r
if (dyn->insts[ninst].e.combined1 || dyn->insts[ninst].e.combined2)
length += sprintf(buf + length, " %s:%d/%d", dyn->insts[ninst].e.swapped ? "SWP" : "CMB", dyn->insts[ninst].e.combined1, dyn->insts[ninst].e.combined2);
if (BOX64DRENV(dynarec_dump)) {
if (dyn->need_dump) {
printf_x64_instruction(dyn, rex.is32bits ? my_context->dec32 : my_context->dec, &dyn->insts[ninst].x64, name);
dynarec_log(LOG_NONE, "%s%p: %d emitted opcodes, inst=%d, %s%s\n",
(BOX64DRENV(dynarec_dump) > 1) ? "\e[32m" : "",
(void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (BOX64DRENV(dynarec_dump) > 1) ? "\e[m" : "");
(dyn->need_dump > 1) ? "\e[32m" : "",
(void*)(dyn->native_start + dyn->insts[ninst].address), dyn->insts[ninst].size / 4, ninst, buf, (dyn->need_dump > 1) ? "\e[m" : "");
}
if (BOX64ENV(dynarec_gdbjit)) {
static char buf2[512];
@ -789,9 +789,9 @@ void print_opcode(dynarec_native_t* dyn, int ninst, uint32_t opcode)
void print_newinst(dynarec_native_t* dyn, int ninst)
{
dynarec_log(LOG_NONE, "%sNew instruction %d, native=%p (0x%x)%s\n",
(BOX64DRENV(dynarec_dump) > 1) ? "\e[4;32m" : "",
(dyn->need_dump > 1) ? "\e[4;32m" : "",
ninst, dyn->block, dyn->native_size,
(BOX64DRENV(dynarec_dump) > 1) ? "\e[m" : "");
(dyn->need_dump > 1) ? "\e[m" : "");
}
// x87 stuffs

View File

@ -2913,10 +2913,10 @@ void fpu_reset_cache(dynarec_rv64_t* dyn, int ninst, int reset_n)
#endif
extcacheUnwind(&dyn->e);
#if STEP == 0
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, "New x87stack=%d\n", dyn->e.x87stack);
if (dyn->need_dump) dynarec_log(LOG_NONE, "New x87stack=%d\n", dyn->e.x87stack);
#endif
#if defined(HAVE_TRACE) && (STEP > 2)
if (BOX64DRENV(dynarec_dump))
if (dyn->need_dump)
if (memcmp(&dyn->e, &dyn->insts[reset_n].e, sizeof(ext_cache_t))) {
MESSAGE(LOG_DEBUG, "Warning, difference in extcache: reset=");
for (int i = 0; i < 24; ++i)

View File

@ -1008,7 +1008,7 @@
#else
#define X87_PUSH_OR_FAIL(var, dyn, ninst, scratch, t) \
if ((dyn->e.x87stack == 8) || (dyn->e.pushed == 8)) { \
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \
if (dyn->need_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \
dyn->abort = 1; \
return addr; \
} \
@ -1016,7 +1016,7 @@
#define X87_PUSH_EMPTY_OR_FAIL(dyn, ninst, scratch) \
if ((dyn->e.x87stack == 8) || (dyn->e.pushed == 8)) { \
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \
if (dyn->need_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Push, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.pushed, ninst); \
dyn->abort = 1; \
return addr; \
} \
@ -1024,7 +1024,7 @@
#define X87_POP_OR_FAIL(dyn, ninst, scratch) \
if ((dyn->e.x87stack == -8) || (dyn->e.poped == 8)) { \
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.poped, ninst); \
if (dyn->need_dump) dynarec_log(LOG_NONE, " Warning, suspicious x87 Pop, stack=%d/%d on inst %d\n", dyn->e.x87stack, dyn->e.poped, ninst); \
dyn->abort = 1; \
return addr; \
} \

View File

@ -74,8 +74,8 @@
--dyn->size; \
*ok = -1; \
if (ninst) { dyn->insts[ninst - 1].x64.size = ip - dyn->insts[ninst - 1].x64.addr; } \
if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 1) \
if (!dyn->size || BOX64ENV(dynarec_log) > LOG_INFO || BOX64DRENV(dynarec_dump)) { \
if (BOX64ENV(dynarec_log) >= LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing) == 1) \
if (!dyn->size || BOX64ENV(dynarec_log) > LOG_INFO || dyn->need_dump) { \
dynarec_log(LOG_NONE, "%p: Dynarec stopped because of %s Opcode ", (void*)ip, rex.is32bits ? "x86" : "x64"); \
zydis_dec_t* dec = rex.is32bits ? my_context->dec32 : my_context->dec; \
if (dec) { \

View File

@ -5,7 +5,7 @@
addInst(dyn->instsize, &dyn->insts_size, 0, 0);
#define EMIT(A) \
do { \
if (BOX64DRENV(dynarec_dump)) print_opcode(dyn, ninst, (uint32_t)A); \
if (dyn->need_dump) print_opcode(dyn, ninst, (uint32_t)A); \
*(uint32_t*)(dyn->block) = (uint32_t)(A); \
dyn->block += 4; \
dyn->native_size += 4; \
@ -14,14 +14,14 @@
#define MESSAGE(A, ...) \
do { \
if (BOX64DRENV(dynarec_dump)) dynarec_log(LOG_NONE, __VA_ARGS__); \
if (dyn->need_dump) dynarec_log(LOG_NONE, __VA_ARGS__); \
} while (0)
#define NEW_INST \
dyn->vector_sew = dyn->insts[ninst].vector_sew_entry; \
dyn->inst_sew = dyn->vector_sew; \
dyn->inst_vlmul = VECTOR_LMUL1; \
dyn->inst_vl = 0; \
if (BOX64DRENV(dynarec_dump)) print_newinst(dyn, ninst); \
if (dyn->need_dump) print_newinst(dyn, ninst); \
if (ninst) { \
addInst(dyn->instsize, &dyn->insts_size, dyn->insts[ninst - 1].x64.size, dyn->insts[ninst - 1].size / 4); \
dyn->insts[ninst].ymm0_pass3 = dyn->ymm_zero; \
@ -46,7 +46,7 @@
}
#define DEFAULT_VECTOR \
if (BOX64ENV(dynarec_log) >= LOG_INFO || BOX64DRENV(dynarec_dump) || BOX64ENV(dynarec_missing) == 2) { \
if (BOX64ENV(dynarec_log) >= LOG_INFO || dyn->need_dump || BOX64ENV(dynarec_missing) == 2) { \
dynarec_log(LOG_NONE, "%p: Dynarec fallback to scalar version because of %s Opcode ", (void*)ip, rex.is32bits ? "x86" : "x64"); \
zydis_dec_t* dec = rex.is32bits ? my_context->dec32 : my_context->dec; \
if (dec) { \

View File

@ -189,6 +189,7 @@ typedef struct dynarec_rv64_s {
uint8_t inst_vlmul; // vlmul inside current instruction
void* gdbjit_block;
uint32_t need_x87check; // x87 low precision check
uint32_t need_dump; // need to dump the block
} dynarec_rv64_t;
// v0 is hardware wired to vector mask register, which should be always reserved

View File

@ -43,6 +43,7 @@ extern char* ftrace_name;
INTEGER(BOX64_DYNAREC_DIRTY, dynarec_dirty, 0, 0, 2) \
BOOLEAN(BOX64_DYNAREC_DIV0, dynarec_div0, 0) \
INTEGER(BOX64_DYNAREC_DUMP, dynarec_dump, 0, 0, 2) \
STRING(BOX64_DYNAREC_DUMP_RANGE, dynarec_dump_range) \
BOOLEAN(BOX64_DYNAREC_FASTNAN, dynarec_fastnan, 1) \
INTEGER(BOX64_DYNAREC_FASTROUND, dynarec_fastround, 1, 0, 2) \
INTEGER(BOX64_DYNAREC_FORWARD, dynarec_forward, 128, 0, 1024) \
@ -195,6 +196,8 @@ typedef struct box64env_s {
uintptr_t nodynarec_end;
uintptr_t dynarec_gdbjit_start;
uintptr_t dynarec_gdbjit_end;
uintptr_t dynarec_dump_range_start;
uintptr_t dynarec_dump_range_end;
uint64_t is_any_overridden : 1;
uint64_t is_dynarec_perf_map_fd_overridden : 1;

View File

@ -85,6 +85,16 @@ static void addNewEnvVar(const char* s)
box_free(p);
}
static void parseRange(const char* s, uintptr_t* start, uintptr_t* end)
{
if (!s) return;
if (!strchr(s, '-')) return;
if (sscanf(s, "%ld-%ld", start, end) == 2) return;
if (sscanf(s, "0x%lX-0x%lX", start, end) == 2) return;
if (sscanf(s, "0x%lx-0x%lx", start, end) == 2) return;
sscanf(s, "%lx-%lx", start, end);
}
static void applyCustomRules()
{
if (BOX64ENV(log) == LOG_NEVER) {
@ -113,12 +123,7 @@ static void applyCustomRules()
box64env.dynarec_test_start = 0x0;
box64env.dynarec_test_end = 0x0;
} else if (strchr(box64env.dynarec_test_str, '-')) {
if (sscanf(box64env.dynarec_test_str, "%ld-%ld", &box64env.dynarec_test_start, &box64env.dynarec_test_end) != 2) {
if (sscanf(box64env.dynarec_test_str, "0x%lX-0x%lX", &box64env.dynarec_test_start, &box64env.dynarec_test_end) != 2) {
if (sscanf(box64env.dynarec_test_str, "0x%lx-0x%lx", &box64env.dynarec_test_start, &box64env.dynarec_test_end) != 2)
sscanf(box64env.dynarec_test_str, "%lx-%lx", &box64env.dynarec_test_start, &box64env.dynarec_test_end);
}
}
parseRange(box64env.dynarec_test_str, &box64env.dynarec_test_start, &box64env.dynarec_test_end);
if (box64env.dynarec_test_end > box64env.dynarec_test_start) {
box64env.dynarec_test = 1;
} else {
@ -135,12 +140,7 @@ static void applyCustomRules()
box64env.dynarec_gdbjit_start = 0x0;
box64env.dynarec_gdbjit_end = 0x0;
} else if (strchr(box64env.dynarec_gdbjit_str, '-')) {
if (sscanf(box64env.dynarec_gdbjit_str, "%ld-%ld", &box64env.dynarec_gdbjit_start, &box64env.dynarec_gdbjit_end) != 2) {
if (sscanf(box64env.dynarec_gdbjit_str, "0x%lX-0x%lX", &box64env.dynarec_gdbjit_start, &box64env.dynarec_gdbjit_end) != 2) {
if (sscanf(box64env.dynarec_gdbjit_str, "0x%lx-0x%lx", &box64env.dynarec_gdbjit_start, &box64env.dynarec_gdbjit_end) != 2)
sscanf(box64env.dynarec_gdbjit_str, "%lx-%lx", &box64env.dynarec_gdbjit_start, &box64env.dynarec_gdbjit_end);
}
}
parseRange(box64env.dynarec_gdbjit_str, &box64env.dynarec_gdbjit_start, &box64env.dynarec_gdbjit_end);
if (box64env.dynarec_gdbjit_end > box64env.dynarec_gdbjit_start) {
box64env.dynarec_gdbjit = 2;
} else {
@ -149,18 +149,11 @@ static void applyCustomRules()
}
}
if (box64env.is_nodynarec_overridden) {
if(box64env.nodynarec) {
if (strchr(box64env.nodynarec,'-')) {
if(sscanf(box64env.nodynarec, "%ld-%ld", &box64env.nodynarec_start, &box64env.nodynarec_end)!=2) {
if(sscanf(box64env.nodynarec, "0x%lX-0x%lX", &box64env.nodynarec_start, &box64env.nodynarec_end)!=2) {
if(sscanf(box64env.nodynarec, "0x%lx-0x%lx", &box64env.nodynarec_start, &box64env.nodynarec_end)!=2)
sscanf(box64env.nodynarec, "%lx-%lx", &box64env.nodynarec_start, &box64env.nodynarec_end);
}
}
}
}
}
if (box64env.is_nodynarec_overridden)
parseRange(box64env.nodynarec, &box64env.nodynarec_start, &box64env.nodynarec_end);
if (box64env.is_dynarec_dump_range_overridden)
parseRange(box64env.dynarec_dump_range, &box64env.dynarec_dump_range_start, &box64env.dynarec_dump_range_end);
if (box64env.dynarec_test) {
SET_BOX64ENV(dynarec_fastnan, 0);