mirror of
https://github.com/ptitSeb/box64.git
synced 2025-10-14 02:38:54 +08:00
[DYNAREC] Change method to detect dead code in block (fixes Spintires on Wow64, maybe some other too)
This commit is contained in:
@@ -298,23 +298,35 @@ int Table64(dynarec_native_t *dyn, uint64_t val, int pass)
|
||||
return delta;
|
||||
}
|
||||
|
||||
static void recurse_mark_alive(dynarec_native_t* dyn, int i)
|
||||
{
|
||||
if(dyn->insts[i].x64.alive)
|
||||
return;
|
||||
dyn->insts[i].x64.alive = 1;
|
||||
if(dyn->insts[i].x64.jmp && dyn->insts[i].x64.jmp_insts!=-1)
|
||||
recurse_mark_alive(dyn, dyn->insts[i].x64.jmp_insts);
|
||||
if(i<dyn->size-1 && dyn->insts[i].x64.has_next)
|
||||
recurse_mark_alive(dyn, i+1);
|
||||
}
|
||||
|
||||
static int sizePredecessors(dynarec_native_t* dyn)
|
||||
{
|
||||
int pred_sz = 1; // to be safe
|
||||
// compute total size of predecessor to allocate the array
|
||||
// mark alive...
|
||||
recurse_mark_alive(dyn, 0);
|
||||
// first compute the jumps
|
||||
int jmpto;
|
||||
for(int i=0; i<dyn->size; ++i) {
|
||||
if(dyn->insts[i].x64.jmp && (dyn->insts[i].x64.jmp_insts!=-1)) {
|
||||
if(dyn->insts[i].x64.alive && dyn->insts[i].x64.jmp && ((jmpto=dyn->insts[i].x64.jmp_insts)!=-1)) {
|
||||
pred_sz++;
|
||||
dyn->insts[dyn->insts[i].x64.jmp_insts].pred_sz++;
|
||||
dyn->insts[jmpto].pred_sz++;
|
||||
}
|
||||
}
|
||||
// remove "has_next" from orphan branch
|
||||
for(int i=0; i<dyn->size-1; ++i) {
|
||||
if(!dyn->insts[i].x64.has_next) {
|
||||
if(dyn->insts[i+1].x64.has_next && !dyn->insts[i+1].pred_sz)
|
||||
dyn->insts[i+1].x64.has_next = 0;
|
||||
}
|
||||
if(dyn->insts[i].x64.has_next && !dyn->insts[i+1].x64.alive)
|
||||
dyn->insts[i].x64.has_next = 0;
|
||||
}
|
||||
// second the "has_next"
|
||||
for(int i=0; i<dyn->size-1; ++i) {
|
||||
@@ -335,7 +347,7 @@ static void fillPredecessors(dynarec_native_t* dyn)
|
||||
dyn->insts[i].pred_sz=0; // reset size, it's reused to actually fill pred[]
|
||||
}
|
||||
// fill pred
|
||||
for(int i=0; i<dyn->size; ++i) {
|
||||
for(int i=0; i<dyn->size; ++i) if(dyn->insts[i].x64.alive) {
|
||||
if((i!=dyn->size-1) && dyn->insts[i].x64.has_next)
|
||||
dyn->insts[i+1].pred[dyn->insts[i+1].pred_sz++] = i;
|
||||
if(dyn->insts[i].x64.jmp && (dyn->insts[i].x64.jmp_insts!=-1)) {
|
||||
|
@@ -190,8 +190,8 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
|
||||
ok = 1;
|
||||
// we use the 1st predecessor here
|
||||
int ii = ninst+1;
|
||||
if(ii<dyn->size && !dyn->insts[ii].pred_sz) {
|
||||
while(ii<dyn->size && !dyn->insts[ii].pred_sz) {
|
||||
if(ii<dyn->size && !dyn->insts[ii].x64.alive) {
|
||||
while(ii<dyn->size && !dyn->insts[ii].x64.alive) {
|
||||
// may need to skip opcodes to advance
|
||||
++ninst;
|
||||
NEW_INST;
|
||||
|
@@ -34,11 +34,12 @@ typedef struct instruction_x64_s {
|
||||
int32_t size; // size of the instruction
|
||||
uintptr_t jmp; // offset to jump to, even if conditionnal (0 if not), no relative offset here
|
||||
int jmp_insts; // instuction to jump to (-1 if out of the block)
|
||||
uint8_t jmp_cond; // 1 of conditionnal jump
|
||||
uint8_t has_next; // does this opcode can continue to the next?
|
||||
uint8_t jmp_cond:1; // 1 of conditionnal jump
|
||||
uint8_t has_next:1; // does this opcode can continue to the next?
|
||||
uint8_t has_callret:1; // this instruction have an optimised call setup
|
||||
uint8_t alive:1; // this opcode gets executed (0 if dead code in that block)
|
||||
uint8_t barrier; // next instruction is a jump point, so no optim allowed
|
||||
uint8_t barrier_next; // next instruction needs a barrier
|
||||
uint8_t has_callret; // this instruction have an optimised call setup
|
||||
uint8_t state_flags;// One of SF_XXX state
|
||||
uint8_t use_flags; // 0 or combination of X_?F
|
||||
uint8_t set_flags; // 0 or combination of X_?F
|
||||
|
Reference in New Issue
Block a user