mirror of
https://github.com/ptitSeb/box64.git
synced 2025-05-09 00:21:32 +08:00
commit
82f60fee35
@ -27,46 +27,46 @@ extern void arm64_lock_read_dq(uint64_t * a, uint64_t* b, void* addr);
|
||||
// STLXRD of ADDR, return 0 if ok, 1 if not
|
||||
extern int arm64_lock_write_dq(uint64_t a, uint64_t b, void* addr);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uintptr_t arm64_lock_xchg_dd(void* p, uintptr_t val);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uint32_t arm64_lock_xchg_d(void* p, uint32_t val);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uint32_t arm64_lock_xchg_h(void* p, uint32_t val);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uint32_t arm64_lock_xchg_b(void* p, uint32_t val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is NULL. Return old [p] value
|
||||
// Atomically store value to [p] only if [p] is NULL. Return old [p] value
|
||||
extern uint32_t arm64_lock_storeifnull_d(void*p, uint32_t val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is NULL. Return old [p] value
|
||||
// Atomically store value to [p] only if [p] is NULL. Return old [p] value
|
||||
extern void* arm64_lock_storeifnull(void*p, void* val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
// Atomically store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern void* arm64_lock_storeifref(void*p, void* val, void* ref);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
// Atomically store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern uint32_t arm64_lock_storeifref_d(void*p, uint32_t val, uint32_t ref);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
// Atomically store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern uint32_t arm64_lock_storeifref2_d(void*p, uint32_t val, uint32_t ref);
|
||||
|
||||
// decrement atomicaly the byte at [p] (but only if p not 0)
|
||||
// decrement atomically the byte at [p] (but only if p not 0)
|
||||
extern void arm64_lock_decifnot0b(void*p);
|
||||
|
||||
// atomic store (with memory barrier)
|
||||
extern void arm64_lock_storeb(void*p, uint8_t b);
|
||||
|
||||
// increment atomicaly the int at [p] only if it was 0. Return the old value of [p]
|
||||
// increment atomically the int at [p] only if it was 0. Return the old value of [p]
|
||||
extern int arm64_lock_incif0(void*p);
|
||||
|
||||
// decrement atomicaly the int at [p] (but only if p not 0)
|
||||
// decrement atomically the int at [p] (but only if p not 0)
|
||||
extern int arm64_lock_decifnot0(void*p);
|
||||
|
||||
// atomic store (with memory barrier)
|
||||
extern void arm64_lock_store(void*p, uint32_t v);
|
||||
|
||||
#endif //__ARM64_LOCK__H__
|
||||
#endif //__ARM64_LOCK__H__
|
||||
|
@ -36,7 +36,7 @@ void neoncache_promote_double(dynarec_arm_t* dyn, int ninst, int a);
|
||||
// Combine and propagate if needed (pass 1 only)
|
||||
int neoncache_combine_st(dynarec_arm_t* dyn, int ninst, int a, int b); // with stack current dyn->n_stack*
|
||||
|
||||
// FPU Cache transformation (for loops) // Specific, need to be written par backend
|
||||
// FPU Cache transformation (for loops) // Specific, need to be written by backend
|
||||
int fpuCacheNeedsTransform(dynarec_arm_t* dyn, int ninst);
|
||||
|
||||
// Undo the changes of a neoncache to get the status before the instruction
|
||||
|
@ -34,12 +34,12 @@
|
||||
|
||||
// Strong mem emulation helpers
|
||||
// Sequence of Read will trigger a DMB on "first" read if strongmem is 2
|
||||
// Squence of Write will trigger a DMB on "last" write if strongmem is 1
|
||||
// Sequence of Write will trigger a DMB on "last" write if strongmem is 1
|
||||
// Opcode will read
|
||||
#define SMREAD() if(!dyn->smread && box64_dynarec_strongmem>1) {SMDMB();}
|
||||
// Opcode will read with option forced lock
|
||||
#define SMREADLOCK(lock) if(lock || (!dyn->smread && box64_dynarec_strongmem>1)) {SMDMB();}
|
||||
// Opcode migh read (depend on nextop)
|
||||
// Opcode might read (depend on nextop)
|
||||
#define SMMIGHTREAD() if(!MODREG) {SMREAD();}
|
||||
// Opcode has wrote
|
||||
#define SMWRITE() dyn->smwrite=1
|
||||
@ -47,7 +47,7 @@
|
||||
#define SMWRITE2() if(box64_dynarec_strongmem>1) dyn->smwrite=1
|
||||
// Opcode has wrote with option forced lock
|
||||
#define SMWRITELOCK(lock) if(lock) {SMDMB();} else dyn->smwrite=1
|
||||
// Opcode migh have wrote (depend on nextop)
|
||||
// Opcode might have wrote (depend on nextop)
|
||||
#define SMMIGHTWRITE() if(!MODREG) {SMWRITE();}
|
||||
// Start of sequence
|
||||
#define SMSTART() SMEND()
|
||||
@ -365,7 +365,7 @@
|
||||
} \
|
||||
gd = i; \
|
||||
UBFXx(gd, gb1, gb2, 8);
|
||||
//GETSGB signe extend GB, will use i for gd
|
||||
//GETSGB sign extend GB, will use i for gd
|
||||
#define GETSGB(i) if(rex.rex) { \
|
||||
gb1 = xRAX+((nextop&0x38)>>3)+(rex.r<<3); \
|
||||
gb2 = 0; \
|
||||
@ -1100,7 +1100,7 @@ void emit_shld32c(dynarec_arm_t* dyn, int ninst, rex_t rex, int s1, int s2, uint
|
||||
void emit_pf(dynarec_arm_t* dyn, int ninst, int s1, int s3, int s4);
|
||||
|
||||
// x87 helper
|
||||
// cache of the local stack counter, to avoid upadte at every call
|
||||
// cache of the local stack counter, to avoid update at every call
|
||||
void x87_stackcount(dynarec_arm_t* dyn, int ninst, int scratch);
|
||||
// fpu push. Return the Dd value to be used
|
||||
int x87_do_push(dynarec_arm_t* dyn, int ninst, int s1, int t);
|
||||
|
@ -77,8 +77,8 @@ typedef struct instruction_arm64_s {
|
||||
uintptr_t natcall;
|
||||
int retn;
|
||||
int barrier_maybe;
|
||||
flagcache_t f_exit; // flags status at end of intruction
|
||||
neoncache_t n; // neoncache at end of intruction (but before poping)
|
||||
flagcache_t f_exit; // flags status at end of instruction
|
||||
neoncache_t n; // neoncache at end of instruction (but before poping)
|
||||
flagcache_t f_entry; // flags status before the instruction begin
|
||||
} instruction_arm64_t;
|
||||
|
||||
@ -87,12 +87,12 @@ typedef struct dynarec_arm_s {
|
||||
int32_t size;
|
||||
int32_t cap;
|
||||
uintptr_t start; // start of the block
|
||||
uint32_t isize; // size in byte of x64 instructions included
|
||||
uint32_t isize; // size in bytes of x64 instructions included
|
||||
void* block; // memory pointer where next instruction is emitted
|
||||
uintptr_t native_start; // start of the arm code
|
||||
size_t native_size; // size of emitted arm code
|
||||
uintptr_t last_ip; // last set IP in RIP (or NULL if unclean state) TODO: move to a cache something
|
||||
uint64_t* table64; // table of 64bits value
|
||||
uint64_t* table64; // table of 64bits values
|
||||
int table64size;// size of table (will be appended at end of executable code)
|
||||
int table64cap;
|
||||
uintptr_t tablestart;
|
||||
@ -106,7 +106,7 @@ typedef struct dynarec_arm_s {
|
||||
dynablock_t* dynablock;
|
||||
instsize_t* instsize;
|
||||
size_t insts_size; // size of the instruction size array (calculated)
|
||||
uint8_t smread; // for strongmem model emulation
|
||||
uint8_t smread; // for strongmem model emulation
|
||||
uint8_t smwrite; // for strongmem model emulation
|
||||
uintptr_t forward; // address of the last end of code while testing forward
|
||||
uintptr_t forward_to; // address of the next jump to (to check if everything is ok)
|
||||
@ -122,7 +122,7 @@ uintptr_t get_closest_next(dynarec_arm_t *dyn, uintptr_t addr);
|
||||
int is_nops(dynarec_arm_t *dyn, uintptr_t addr, int n);
|
||||
int is_instructions(dynarec_arm_t *dyn, uintptr_t addr, int n);
|
||||
|
||||
int Table64(dynarec_arm_t *dyn, uint64_t val, int pass); // add a value to etable64 (if needed) and gives back the imm19 to use in LDR_literal
|
||||
int Table64(dynarec_arm_t *dyn, uint64_t val, int pass); // add a value to table64 (if needed) and gives back the imm19 to use in LDR_literal
|
||||
|
||||
void CreateJmpNext(void* addr, void* next);
|
||||
|
||||
|
@ -209,7 +209,7 @@ static dynablock_t* internalDBGetBlock(x64emu_t* emu, uintptr_t addr, uintptr_t
|
||||
// fill the block
|
||||
block->x64_addr = (void*)addr;
|
||||
if(sigsetjmp(&dynarec_jmpbuf, 1)) {
|
||||
printf_log(LOG_INFO, "FillBlock at %p triggered a segfault, cancelling\n", (void*)addr);
|
||||
printf_log(LOG_INFO, "FillBlock at %p triggered a segfault, canceling\n", (void*)addr);
|
||||
FreeDynablock(block, 0);
|
||||
if(need_lock)
|
||||
mutex_unlock(&my_context->mutex_dyndump);
|
||||
|
@ -78,7 +78,7 @@ void add_next(dynarec_native_t *dyn, uintptr_t addr) {
|
||||
dyn->next[dyn->next_sz++] = addr;
|
||||
}
|
||||
uintptr_t get_closest_next(dynarec_native_t *dyn, uintptr_t addr) {
|
||||
// get closest, but no addresses befores
|
||||
// get closest, but no addresses before
|
||||
uintptr_t best = 0;
|
||||
int i = 0;
|
||||
while((i<dyn->next_sz) && (best!=addr)) {
|
||||
@ -124,8 +124,8 @@ int is_nops(dynarec_native_t *dyn, uintptr_t addr, int n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
// return size of next instuciton, -1 is unknown
|
||||
// not all instrction are setup
|
||||
// return size of next instruction, -1 is unknown
|
||||
// not all instructions are setup
|
||||
int next_instruction(dynarec_native_t *dyn, uintptr_t addr)
|
||||
{
|
||||
uint8_t opcode = PK(0);
|
||||
@ -293,7 +293,7 @@ int Table64(dynarec_native_t *dyn, uint64_t val, int pass)
|
||||
static void fillPredecessors(dynarec_native_t* dyn)
|
||||
{
|
||||
int pred_sz = 1; // to be safe
|
||||
// compute total size of predecessor to alocate the array
|
||||
// compute total size of predecessor to allocate the array
|
||||
// first compute the jumps
|
||||
for(int i=0; i<dyn->size; ++i) {
|
||||
if(dyn->insts[i].x64.jmp && (dyn->insts[i].x64.jmp_insts!=-1)) {
|
||||
@ -301,7 +301,7 @@ static void fillPredecessors(dynarec_native_t* dyn)
|
||||
dyn->insts[dyn->insts[i].x64.jmp_insts].pred_sz++;
|
||||
}
|
||||
}
|
||||
// remove "has_next" from orphean branch
|
||||
// remove "has_next" from orphan branch
|
||||
for(int i=0; i<dyn->size-1; ++i) {
|
||||
if(!dyn->insts[i].x64.has_next) {
|
||||
if(dyn->insts[i+1].x64.has_next && !dyn->insts[i+1].pred_sz)
|
||||
@ -335,7 +335,7 @@ static void fillPredecessors(dynarec_native_t* dyn)
|
||||
}
|
||||
}
|
||||
|
||||
// updateNeed goes backward, from last intruction to top
|
||||
// updateNeed goes backward, from last instruction to top
|
||||
static int updateNeed(dynarec_native_t* dyn, int ninst, uint8_t need) {
|
||||
while (ninst>=0) {
|
||||
// need pending but instruction is only a subset: remove pend and use an X_ALL instead
|
||||
@ -417,7 +417,7 @@ void* CreateEmptyBlock(dynablock_t* block, uintptr_t addr) {
|
||||
void* actual_p = (void*)AllocDynarecMap(sz);
|
||||
void* p = actual_p + sizeof(void*);
|
||||
if(actual_p==NULL) {
|
||||
dynarec_log(LOG_INFO, "AllocDynarecMap(%p, %zu) failed, cancelling block\n", block, sz);
|
||||
dynarec_log(LOG_INFO, "AllocDynarecMap(%p, %zu) failed, canceling block\n", block, sz);
|
||||
CancelBlock64(0);
|
||||
return NULL;
|
||||
}
|
||||
@ -444,11 +444,11 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
|
||||
B+8 .. B+15 : 2 Native code for jmpnext (or jmp epilog in case of empty block)
|
||||
B+16 .. B+23 : jmpnext (or jmp_epilog) address
|
||||
B+24 .. B+31 : empty (in case an architecture needs more than 2 opcodes)
|
||||
B+32 .. B+32+sz : instsize (compressed array with each instruction lenght on x64 and native side)
|
||||
B+32 .. B+32+sz : instsize (compressed array with each instruction length on x64 and native side)
|
||||
|
||||
*/
|
||||
if(IsInHotPage(addr)) {
|
||||
dynarec_log(LOG_DEBUG, "Cancelling dynarec FillBlock on hotpage for %p\n", (void*)addr);
|
||||
dynarec_log(LOG_DEBUG, "Canceling dynarec FillBlock on hotpage for %p\n", (void*)addr);
|
||||
return NULL;
|
||||
}
|
||||
if(addr>=box64_nodynarec_start && addr<box64_nodynarec_end) {
|
||||
@ -456,7 +456,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
|
||||
return CreateEmptyBlock(block, addr);
|
||||
}
|
||||
if(current_helper) {
|
||||
dynarec_log(LOG_DEBUG, "Cancelling dynarec FillBlock at %p as anothor one is going on\n", (void*)addr);
|
||||
dynarec_log(LOG_DEBUG, "Canceling dynarec FillBlock at %p as another one is going on\n", (void*)addr);
|
||||
return NULL;
|
||||
}
|
||||
// protect the 1st page
|
||||
@ -520,7 +520,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
|
||||
while (pos>=0)
|
||||
pos = updateNeed(&helper, pos, 0);
|
||||
|
||||
// pass 1, float optimisations, first pass for flags
|
||||
// pass 1, float optimizations, first pass for flags
|
||||
native_pass1(&helper, addr, alternate, is32bits);
|
||||
|
||||
// pass 2, instruction size
|
||||
@ -538,7 +538,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
|
||||
void* next = tablestart + helper.table64size*sizeof(uint64_t);
|
||||
void* instsize = next + 4*sizeof(void*);
|
||||
if(actual_p==NULL) {
|
||||
dynarec_log(LOG_INFO, "AllocDynarecMap(%p, %zu) failed, cancelling block\n", block, sz);
|
||||
dynarec_log(LOG_INFO, "AllocDynarecMap(%p, %zu) failed, canceling block\n", block, sz);
|
||||
CancelBlock64(0);
|
||||
return NULL;
|
||||
}
|
||||
@ -588,7 +588,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
|
||||
// all done...
|
||||
__clear_cache(actual_p, actual_p+sz); // need to clear the cache before execution...
|
||||
block->hash = X31_hash_code(block->x64_addr, block->x64_size);
|
||||
// Check if something changed, to abbort if it as
|
||||
// Check if something changed, to abort if it is
|
||||
if((block->hash != hash)) {
|
||||
dynarec_log(LOG_DEBUG, "Warning, a block changed while being processed hash(%p:%ld)=%x/%x\n", block->x64_addr, block->x64_size, block->hash, hash);
|
||||
AddHotPage(addr);
|
||||
@ -611,7 +611,7 @@ void* FillBlock64(dynablock_t* block, uintptr_t addr, int alternate, int is32bit
|
||||
return NULL;
|
||||
}
|
||||
if(insts_rsize/sizeof(instsize_t)<helper.insts_size) {
|
||||
printf_log(LOG_NONE, "BOX64: Warning, ists_size difference in block between pass2 (%zu) and pass3 (%zu), allocated: %zu\n", oldinstsize, helper.insts_size, insts_rsize/sizeof(instsize_t));
|
||||
printf_log(LOG_NONE, "BOX64: Warning, insts_size difference in block between pass2 (%zu) and pass3 (%zu), allocated: %zu\n", oldinstsize, helper.insts_size, insts_rsize/sizeof(instsize_t));
|
||||
}
|
||||
if(!isprotectedDB(addr, end-addr)) {
|
||||
dynarec_log(LOG_DEBUG, "Warning, block unprotected while being processed %p:%ld, marking as need_test\n", block->x64_addr, block->x64_size);
|
||||
|
@ -138,7 +138,7 @@ void native_fistp64(x64emu_t* emu, int64_t* ed)
|
||||
|
||||
void native_fistt64(x64emu_t* emu, int64_t* ed)
|
||||
{
|
||||
// used of memcpy to avoid aligments issues
|
||||
// used of memcpy to avoid alignments issues
|
||||
int64_t tmp = ST0.d;
|
||||
memcpy(ed, &tmp, sizeof(tmp));
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
|
||||
ARCH_INIT();
|
||||
int reset_n = -1;
|
||||
dyn->last_ip = (alternate || (dyn->insts && dyn->insts[0].pred_sz))?0:ip; // RIP is always set at start of block unless there is a predecessor!
|
||||
int stopblock = 2+(FindElfAddress(my_context, addr)?0:1); // if block is in elf_memory, it can be extended with bligblocks==2, else it needs 3
|
||||
int stopblock = 2+(FindElfAddress(my_context, addr)?0:1); // if block is in elf_memory, it can be extended with box64_dynarec_bigblock==2, else it needs 3
|
||||
// ok, go now
|
||||
INIT;
|
||||
while(ok) {
|
||||
@ -74,7 +74,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
|
||||
}
|
||||
reset_n = -1;
|
||||
} else if(ninst && (dyn->insts[ninst].pred_sz>1 || (dyn->insts[ninst].pred_sz==1 && dyn->insts[ninst].pred[0]!=ninst-1)))
|
||||
dyn->last_ip = 0; // reset IP if some jump are comming here
|
||||
dyn->last_ip = 0; // reset IP if some jump are coming here
|
||||
fpu_propagate_stack(dyn, ninst);
|
||||
NEW_INST;
|
||||
if(!ninst) {
|
||||
@ -166,7 +166,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
|
||||
if((dyn->insts[ii].x64.barrier&BARRIER_FULL)==BARRIER_FULL)
|
||||
reset_n = -2; // hack to say Barrier!
|
||||
else {
|
||||
reset_n = getNominalPred(dyn, ii); // may get -1 if no predecessor are availble
|
||||
reset_n = getNominalPred(dyn, ii); // may get -1 if no predecessor are available
|
||||
if(reset_n==-1) {
|
||||
reset_n = -2;
|
||||
MESSAGE(LOG_DEBUG, "Warning, Reset Caches mark not found\n");
|
||||
@ -296,7 +296,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
|
||||
if(need_epilog) {
|
||||
NOTEST(x3);
|
||||
fpu_purgecache(dyn, ninst, 0, x1, x2, x3);
|
||||
jump_to_epilog(dyn, ip, 0, ninst); // no linker here, it's an unknow instruction
|
||||
jump_to_epilog(dyn, ip, 0, ninst); // no linker here, it's an unknown instruction
|
||||
}
|
||||
FINI;
|
||||
MESSAGE(LOG_DUMP, "---- END OF BLOCK ---- (%d)\n", dyn->size);
|
||||
|
@ -135,7 +135,7 @@ uintptr_t get_closest_next(dynarec_rv64_t *dyn, uintptr_t addr);
|
||||
int is_nops(dynarec_rv64_t *dyn, uintptr_t addr, int n);
|
||||
int is_instructions(dynarec_rv64_t *dyn, uintptr_t addr, int n);
|
||||
|
||||
int Table64(dynarec_rv64_t *dyn, uint64_t val, int pass); // add a value to etable64 (if needed) and gives back the imm19 to use in LDR_literal
|
||||
int Table64(dynarec_rv64_t *dyn, uint64_t val, int pass); // add a value to table64 (if needed) and gives back the imm19 to use in LDR_literal
|
||||
|
||||
void CreateJmpNext(void* addr, void* next);
|
||||
|
||||
|
@ -2,49 +2,49 @@
|
||||
#define __RV64_LOCK__H__
|
||||
#include <stdint.h>
|
||||
|
||||
// Atomicaly store val at [p] if old [p] is ref. Return 0 if OK, 1 is not. p needs to be aligned
|
||||
// Atomically store val at [p] if old [p] is ref. Return 0 if OK, 1 is not. p needs to be aligned
|
||||
extern int rv64_lock_cas_d(void* p, int32_t ref, int32_t val);
|
||||
|
||||
// Atomicaly store val at [p] if old [p] is ref. Return 0 if OK, 1 is not. p needs to be aligned
|
||||
// Atomically store val at [p] if old [p] is ref. Return 0 if OK, 1 is not. p needs to be aligned
|
||||
extern int rv64_lock_cas_dd(void* p, int64_t ref, int64_t val);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uintptr_t rv64_lock_xchg_dd(void* p, uintptr_t val);
|
||||
|
||||
// Atomicaly exchange value at [p] with val, return old p
|
||||
// Atomically exchange value at [p] with val, return old p
|
||||
extern uint32_t rv64_lock_xchg_d(void* p, uint32_t val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is NULL. Return old [p] value
|
||||
// Atomically store value to [p] only if [p] is NULL. Return old [p] value
|
||||
extern uint32_t rv64_lock_storeifnull_d(void*p, uint32_t val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is NULL. Return old [p] value
|
||||
// Atomically store value to [p] only if [p] is NULL. Return old [p] value
|
||||
extern void* rv64_lock_storeifnull(void*p, void* val);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
// Atomically store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern void* rv64_lock_storeifref(void*p, void* val, void* ref);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
// Atomically store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern uint32_t rv64_lock_storeifref_d(void*p, uint32_t val, uint32_t ref);
|
||||
|
||||
// Atomicaly store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
// Atomically store value to [p] only if [p] is ref. Return new [p] value (so val or old)
|
||||
extern uint32_t rv64_lock_storeifref2_d(void*p, uint32_t val, uint32_t ref);
|
||||
|
||||
// decrement atomicaly the byte at [p] (but only if p not 0)
|
||||
// decrement atomically the byte at [p] (but only if p not 0)
|
||||
extern void rv64_lock_decifnot0b(void*p);
|
||||
|
||||
// atomic store (with memory barrier)
|
||||
extern void rv64_lock_storeb(void*p, uint8_t b);
|
||||
|
||||
// increment atomicaly the int at [p] only if it was 0. Return the old value of [p]
|
||||
// increment atomically the int at [p] only if it was 0. Return the old value of [p]
|
||||
extern int rv64_lock_incif0(void*p);
|
||||
|
||||
// decrement atomicaly the int at [p] (but only if p not 0)
|
||||
// decrement atomically the int at [p] (but only if p not 0)
|
||||
extern int rv64_lock_decifnot0(void*p);
|
||||
|
||||
// atomic store (with memory barrier)
|
||||
extern void rv64_lock_store(void*p, uint32_t v);
|
||||
|
||||
// (mostly) Atomicaly store val1 and val2 at [p] if old [p] is ref. Return 0 if OK, 1 is not. p needs to be aligned
|
||||
// (mostly) Atomically store val1 and val2 at [p] if old [p] is ref. Return 0 if OK, 1 is not. p needs to be aligned
|
||||
extern int rv64_lock_cas_dq(void* p, uint64_t ref, uint64_t val1, uint64_t val2);
|
||||
|
||||
// Not defined in assembler but in dynarec_rv64_functions
|
||||
|
@ -48,7 +48,7 @@ typedef struct x64emu_s {
|
||||
mmx87_regs_t mmx[8];
|
||||
x87control_t cw;
|
||||
x87flags_t sw;
|
||||
uint32_t top; // top is part of sw, but it's faster to have it separatly
|
||||
uint32_t top; // top is part of sw, but it's faster to have it separately
|
||||
int fpu_stack;
|
||||
mmxcontrol_t mxcsr;
|
||||
fpu_ld_t fpu_ld[8]; // for long double emulation / 80bits fld fst
|
||||
@ -93,7 +93,7 @@ typedef struct x64emu_s {
|
||||
#ifdef HAVE_TRACE
|
||||
sse_regs_t old_xmm[16];
|
||||
#endif
|
||||
// scratch stack, used for alignement of double and 64bits ints on arm. 200 elements should be enough
|
||||
// scratch stack, used for alignment of double and 64bits ints on arm. 200 elements should be enough
|
||||
uint64_t scratch[200];
|
||||
// local stack, do be deleted when emu is freed
|
||||
void* stack2free; // this is the stack to free (can be NULL)
|
||||
|
@ -129,14 +129,14 @@ static inline void fpu_fxam(x64emu_t* emu) {
|
||||
return;
|
||||
}
|
||||
if(isinf(ST0.d))
|
||||
{ // TODO: Unsuported and denormal not analysed...
|
||||
{ // TODO: Unsupported and denormal not analysed...
|
||||
emu->sw.f.F87_C3 = 0;
|
||||
emu->sw.f.F87_C2 = 1;
|
||||
emu->sw.f.F87_C0 = 1;
|
||||
return;
|
||||
}
|
||||
if(isnan(ST0.d))
|
||||
{ // TODO: Unsuported and denormal not analysed...
|
||||
{ // TODO: Unsupported and denormal not analysed...
|
||||
emu->sw.f.F87_C3 = 0;
|
||||
emu->sw.f.F87_C2 = 0;
|
||||
emu->sw.f.F87_C0 = 1;
|
||||
@ -159,7 +159,7 @@ static inline void fpu_fxam(x64emu_t* emu) {
|
||||
static inline void fpu_ftst(x64emu_t* emu) {
|
||||
emu->sw.f.F87_C1 = 0;
|
||||
if(isinf(ST0.d) || isnan(ST0.d))
|
||||
{ // TODO: Unsuported and denormal not analysed...
|
||||
{ // TODO: Unsupported and denormal not analysed...
|
||||
emu->sw.f.F87_C3 = 1;
|
||||
emu->sw.f.F87_C2 = 1;
|
||||
emu->sw.f.F87_C0 = 1;
|
||||
|
Loading…
x
Reference in New Issue
Block a user