text stringlengths 9 39.2M | dir stringlengths 26 295 | lang stringclasses 185 values | created_date timestamp[us] | updated_date timestamp[us] | repo_name stringlengths 1 97 | repo_full_name stringlengths 7 106 | star int64 1k 183k | len_tokens int64 1 13.8M |
|---|---|---|---|---|---|---|---|---|
```objective-c
uint32_t ropMOVD_r_d(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropMOVD_d_r(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropMOVQ_r_q(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropMOVQ_q_r(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
``` | /content/code_sandbox/src/codegen_new/codegen_ops_mmx_loadstore.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 177 |
```c
#if defined __aarch64__ || defined _M_ARM64
# include <stdlib.h>
# include <stdint.h>
# include <86box/86box.h>
# include "cpu.h"
# include <86box/mem.h>
# include "codegen.h"
# include "codegen_allocator.h"
# include "codegen_backend.h"
# include "codegen_backend_arm64_defs.h"
# include "codegen_backend_arm64_ops.h"
# include "codegen_reg.h"
# include "x86.h"
# include "x86seg_common.h"
# include "x86seg.h"
# include "x87_sf.h"
# include "x87.h"
# if defined(__linux__) || defined(__APPLE__)
# include <sys/mman.h>
# include <unistd.h>
# endif
# if defined WIN32 || defined _WIN32 || defined _WIN32
# include <windows.h>
# endif
# include <string.h>
void *codegen_mem_load_byte;
void *codegen_mem_load_word;
void *codegen_mem_load_long;
void *codegen_mem_load_quad;
void *codegen_mem_load_single;
void *codegen_mem_load_double;
void *codegen_mem_store_byte;
void *codegen_mem_store_word;
void *codegen_mem_store_long;
void *codegen_mem_store_quad;
void *codegen_mem_store_single;
void *codegen_mem_store_double;
void *codegen_fp_round;
void *codegen_fp_round_quad;
void *codegen_gpf_rout;
void *codegen_exit_rout;
host_reg_def_t codegen_host_reg_list[CODEGEN_HOST_REGS] = {
{ REG_X19, 0},
{ REG_X20, 0},
{ REG_X21, 0},
{ REG_X22, 0},
{ REG_X23, 0},
{ REG_X24, 0},
{ REG_X25, 0},
{ REG_X26, 0},
{ REG_X27, 0},
{ REG_X28, 0}
};
host_reg_def_t codegen_host_fp_reg_list[CODEGEN_HOST_FP_REGS] = {
{ REG_V8, 0},
{ REG_V9, 0},
{ REG_V10, 0},
{ REG_V11, 0},
{ REG_V12, 0},
{ REG_V13, 0},
{ REG_V14, 0},
{ REG_V15, 0}
};
static void
build_load_routine(codeblock_t *block, int size, int is_float)
{
uint32_t *branch_offset;
uint32_t *misaligned_offset;
/*In - W0 = address
Out - W0 = data, W1 = abrt*/
/*MOV W1, W0, LSR #12
MOV X2, #readlookup2
LDR X1, [X2, X1, LSL #3]
CMP X1, #-1
BEQ +
LDRB W0, [X1, X0]
MOV W1, #0
RET
* STP X29, X30, [SP, #-16]
BL readmembl
LDRB R1, cpu_state.abrt
LDP X29, X30, [SP, #-16]
RET
*/
codegen_alloc(block, 80);
host_arm64_MOV_REG_LSR(block, REG_W1, REG_W0, 12);
host_arm64_MOVX_IMM(block, REG_X2, (uint64_t) readlookup2);
host_arm64_LDRX_REG_LSL3(block, REG_X1, REG_X2, REG_X1);
if (size != 1) {
host_arm64_TST_IMM(block, REG_W0, size - 1);
misaligned_offset = host_arm64_BNE_(block);
}
host_arm64_CMPX_IMM(block, REG_X1, -1);
branch_offset = host_arm64_BEQ_(block);
if (size == 1 && !is_float)
host_arm64_LDRB_REG(block, REG_W0, REG_W1, REG_W0);
else if (size == 2 && !is_float)
host_arm64_LDRH_REG(block, REG_W0, REG_W1, REG_W0);
else if (size == 4 && !is_float)
host_arm64_LDR_REG(block, REG_W0, REG_W1, REG_W0);
else if (size == 4 && is_float)
host_arm64_LDR_REG_F32(block, REG_V_TEMP, REG_W1, REG_W0);
else if (size == 8)
host_arm64_LDR_REG_F64(block, REG_V_TEMP, REG_W1, REG_W0);
host_arm64_MOVZ_IMM(block, REG_W1, 0);
host_arm64_RET(block, REG_X30);
host_arm64_branch_set_offset(branch_offset, &block_write_data[block_pos]);
if (size != 1)
host_arm64_branch_set_offset(misaligned_offset, &block_write_data[block_pos]);
host_arm64_STP_PREIDX_X(block, REG_X29, REG_X30, REG_XSP, -16);
if (size == 1)
host_arm64_call(block, (void *) readmembl);
else if (size == 2)
host_arm64_call(block, (void *) readmemwl);
else if (size == 4)
host_arm64_call(block, (void *) readmemll);
else if (size == 8)
host_arm64_call(block, (void *) readmemql);
else
fatal("build_load_routine - unknown size %i\n", size);
codegen_direct_read_8(block, REG_W1, &cpu_state.abrt);
if (size == 4 && is_float)
host_arm64_FMOV_S_W(block, REG_V_TEMP, REG_W0);
else if (size == 8)
host_arm64_FMOV_D_Q(block, REG_V_TEMP, REG_X0);
host_arm64_LDP_POSTIDX_X(block, REG_X29, REG_X30, REG_XSP, 16);
host_arm64_RET(block, REG_X30);
}
static void
build_store_routine(codeblock_t *block, int size, int is_float)
{
uint32_t *branch_offset;
uint32_t *misaligned_offset;
/*In - R0 = address, R1 = data
Out - R1 = abrt*/
/*MOV W2, W0, LSR #12
MOV X3, #writelookup2
LDR X2, [X3, X2, LSL #3]
CMP X2, #-1
BEQ +
STRB W1, [X2, X0]
MOV W1, #0
RET
* STP X29, X30, [SP, #-16]
BL writemembl
LDRB R1, cpu_state.abrt
LDP X29, X30, [SP, #-16]
RET
*/
codegen_alloc(block, 80);
host_arm64_MOV_REG_LSR(block, REG_W2, REG_W0, 12);
host_arm64_MOVX_IMM(block, REG_X3, (uint64_t) writelookup2);
host_arm64_LDRX_REG_LSL3(block, REG_X2, REG_X3, REG_X2);
if (size != 1) {
host_arm64_TST_IMM(block, REG_W0, size - 1);
misaligned_offset = host_arm64_BNE_(block);
}
host_arm64_CMPX_IMM(block, REG_X2, -1);
branch_offset = host_arm64_BEQ_(block);
if (size == 1 && !is_float)
host_arm64_STRB_REG(block, REG_X1, REG_X2, REG_X0);
else if (size == 2 && !is_float)
host_arm64_STRH_REG(block, REG_X1, REG_X2, REG_X0);
else if (size == 4 && !is_float)
host_arm64_STR_REG(block, REG_X1, REG_X2, REG_X0);
else if (size == 4 && is_float)
host_arm64_STR_REG_F32(block, REG_V_TEMP, REG_X2, REG_X0);
else if (size == 8)
host_arm64_STR_REG_F64(block, REG_V_TEMP, REG_X2, REG_X0);
host_arm64_MOVZ_IMM(block, REG_X1, 0);
host_arm64_RET(block, REG_X30);
host_arm64_branch_set_offset(branch_offset, &block_write_data[block_pos]);
if (size != 1)
host_arm64_branch_set_offset(misaligned_offset, &block_write_data[block_pos]);
host_arm64_STP_PREIDX_X(block, REG_X29, REG_X30, REG_XSP, -16);
if (size == 4 && is_float)
host_arm64_FMOV_W_S(block, REG_W1, REG_V_TEMP);
else if (size == 8)
host_arm64_FMOV_Q_D(block, REG_X1, REG_V_TEMP);
if (size == 1)
host_arm64_call(block, (void *) writemembl);
else if (size == 2)
host_arm64_call(block, (void *) writememwl);
else if (size == 4)
host_arm64_call(block, (void *) writememll);
else if (size == 8)
host_arm64_call(block, (void *) writememql);
else
fatal("build_store_routine - unknown size %i\n", size);
codegen_direct_read_8(block, REG_W1, &cpu_state.abrt);
host_arm64_LDP_POSTIDX_X(block, REG_X29, REG_X30, REG_XSP, 16);
host_arm64_RET(block, REG_X30);
}
static void
build_loadstore_routines(codeblock_t *block)
{
codegen_mem_load_byte = &block_write_data[block_pos];
build_load_routine(block, 1, 0);
codegen_mem_load_word = &block_write_data[block_pos];
build_load_routine(block, 2, 0);
codegen_mem_load_long = &block_write_data[block_pos];
build_load_routine(block, 4, 0);
codegen_mem_load_quad = &block_write_data[block_pos];
build_load_routine(block, 8, 0);
codegen_mem_load_single = &block_write_data[block_pos];
build_load_routine(block, 4, 1);
codegen_mem_load_double = &block_write_data[block_pos];
build_load_routine(block, 8, 1);
codegen_mem_store_byte = &block_write_data[block_pos];
build_store_routine(block, 1, 0);
codegen_mem_store_word = &block_write_data[block_pos];
build_store_routine(block, 2, 0);
codegen_mem_store_long = &block_write_data[block_pos];
build_store_routine(block, 4, 0);
codegen_mem_store_quad = &block_write_data[block_pos];
build_store_routine(block, 8, 0);
codegen_mem_store_single = &block_write_data[block_pos];
build_store_routine(block, 4, 1);
codegen_mem_store_double = &block_write_data[block_pos];
build_store_routine(block, 8, 1);
}
static void
build_fp_round_routine(codeblock_t *block, int is_quad)
{
uint64_t *jump_table;
codegen_alloc(block, 80);
host_arm64_LDR_IMM_W(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.new_fp_control - (uintptr_t) &cpu_state);
host_arm64_ADR(block, REG_TEMP2, 12);
host_arm64_LDR_REG_X(block, REG_TEMP2, REG_TEMP2, REG_TEMP);
host_arm64_BR(block, REG_TEMP2);
jump_table = (uint64_t *) &block_write_data[block_pos];
block_pos += 4 * 8;
jump_table[X87_ROUNDING_NEAREST] = (uint64_t) (uintptr_t) &block_write_data[block_pos]; // tie even
if (is_quad)
host_arm64_FCVTNS_X_D(block, REG_TEMP, REG_V_TEMP);
else
host_arm64_FCVTNS_W_D(block, REG_TEMP, REG_V_TEMP);
host_arm64_RET(block, REG_X30);
jump_table[X87_ROUNDING_UP] = (uint64_t) (uintptr_t) &block_write_data[block_pos]; // pos inf
if (is_quad)
host_arm64_FCVTPS_X_D(block, REG_TEMP, REG_V_TEMP);
else
host_arm64_FCVTPS_W_D(block, REG_TEMP, REG_V_TEMP);
host_arm64_RET(block, REG_X30);
jump_table[X87_ROUNDING_DOWN] = (uint64_t) (uintptr_t) &block_write_data[block_pos]; // neg inf
if (is_quad)
host_arm64_FCVTMS_X_D(block, REG_TEMP, REG_V_TEMP);
else
host_arm64_FCVTMS_W_D(block, REG_TEMP, REG_V_TEMP);
host_arm64_RET(block, REG_X30);
jump_table[X87_ROUNDING_CHOP] = (uint64_t) (uintptr_t) &block_write_data[block_pos]; // zero
if (is_quad)
host_arm64_FCVTZS_X_D(block, REG_TEMP, REG_V_TEMP);
else
host_arm64_FCVTZS_W_D(block, REG_TEMP, REG_V_TEMP);
host_arm64_RET(block, REG_X30);
}
void
codegen_backend_init(void)
{
codeblock_t *block;
codeblock = malloc(BLOCK_SIZE * sizeof(codeblock_t));
codeblock_hash = malloc(HASH_SIZE * sizeof(codeblock_t *));
memset(codeblock, 0, BLOCK_SIZE * sizeof(codeblock_t));
memset(codeblock_hash, 0, HASH_SIZE * sizeof(codeblock_t *));
for (int c = 0; c < BLOCK_SIZE; c++) {
codeblock[c].pc = BLOCK_PC_INVALID;
}
block_current = 0;
block_pos = 0;
block = &codeblock[block_current];
block->head_mem_block = codegen_allocator_allocate(NULL, block_current);
block->data = codeblock_allocator_get_ptr(block->head_mem_block);
block_write_data = block->data;
build_loadstore_routines(block);
codegen_fp_round = &block_write_data[block_pos];
build_fp_round_routine(block, 0);
codegen_fp_round_quad = &block_write_data[block_pos];
build_fp_round_routine(block, 1);
codegen_alloc(block, 80);
codegen_gpf_rout = &block_write_data[block_pos];
host_arm64_mov_imm(block, REG_ARG0, 0);
host_arm64_mov_imm(block, REG_ARG1, 0);
host_arm64_call(block, (void *) x86gpf);
codegen_exit_rout = &block_write_data[block_pos];
host_arm64_LDP_POSTIDX_X(block, REG_X19, REG_X20, REG_XSP, 64);
host_arm64_LDP_POSTIDX_X(block, REG_X21, REG_X22, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X23, REG_X24, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X25, REG_X26, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X27, REG_X28, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X29, REG_X30, REG_XSP, 16);
host_arm64_RET(block, REG_X30);
block_write_data = NULL;
codegen_allocator_clean_blocks(block->head_mem_block);
asm("mrs %0, fpcr\n"
: "=r"(cpu_state.old_fp_control));
}
void
codegen_set_rounding_mode(int mode)
{
if (mode < 0 || mode > 3)
fatal("codegen_set_rounding_mode - invalid mode\n");
cpu_state.new_fp_control = mode << 3;
}
/*R10 - cpu_state*/
void
codegen_backend_prologue(codeblock_t *block)
{
block_pos = BLOCK_START;
/*Entry code*/
host_arm64_STP_PREIDX_X(block, REG_X29, REG_X30, REG_XSP, -16);
host_arm64_STP_PREIDX_X(block, REG_X27, REG_X28, REG_XSP, -16);
host_arm64_STP_PREIDX_X(block, REG_X25, REG_X26, REG_XSP, -16);
host_arm64_STP_PREIDX_X(block, REG_X23, REG_X24, REG_XSP, -16);
host_arm64_STP_PREIDX_X(block, REG_X21, REG_X22, REG_XSP, -16);
host_arm64_STP_PREIDX_X(block, REG_X19, REG_X20, REG_XSP, -64);
host_arm64_MOVX_IMM(block, REG_CPUSTATE, (uint64_t) &cpu_state);
if (block->flags & CODEBLOCK_HAS_FPU) {
host_arm64_LDR_IMM_W(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.TOP - (uintptr_t) &cpu_state);
host_arm64_SUB_IMM(block, REG_TEMP, REG_TEMP, block->TOP);
host_arm64_STR_IMM_W(block, REG_TEMP, REG_XSP, IREG_TOP_diff_stack_offset);
}
}
void
codegen_backend_epilogue(codeblock_t *block)
{
host_arm64_LDP_POSTIDX_X(block, REG_X19, REG_X20, REG_XSP, 64);
host_arm64_LDP_POSTIDX_X(block, REG_X21, REG_X22, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X23, REG_X24, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X25, REG_X26, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X27, REG_X28, REG_XSP, 16);
host_arm64_LDP_POSTIDX_X(block, REG_X29, REG_X30, REG_XSP, 16);
host_arm64_RET(block, REG_X30);
codegen_allocator_clean_blocks(block->head_mem_block);
}
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_backend_arm64.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 4,110 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include "x86.h"
#include "x86_flags.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include "386_common.h"
#include "codegen.h"
#include "codegen_accumulate.h"
#include "codegen_ir.h"
#include "codegen_ops.h"
#include "codegen_ops_mmx_cmp.h"
#include "codegen_ops_helpers.h"
#define ropPcmp(func) \
uint32_t rop##func(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
{ \
int dest_reg = (fetchdat >> 3) & 7; \
\
uop_MMX_ENTER(ir); \
codegen_mark_code_present(block, cs + op_pc, 1); \
if ((fetchdat & 0xc0) == 0xc0) { \
int src_reg = fetchdat & 7; \
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_MM(src_reg)); \
} else { \
x86seg *target_seg; \
\
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
codegen_check_seg_read(block, ir, target_seg); \
uop_MEM_LOAD_REG(ir, IREG_temp0_Q, ireg_seg_base(target_seg), IREG_eaaddr); \
uop_##func(ir, IREG_MM(dest_reg), IREG_MM(dest_reg), IREG_temp0_Q); \
} \
\
return op_pc + 1; \
}
// clang-format off
ropPcmp(PCMPEQB)
ropPcmp(PCMPEQW)
ropPcmp(PCMPEQD)
ropPcmp(PCMPGTB)
ropPcmp(PCMPGTW)
ropPcmp(PCMPGTD)
// clang-format on
``` | /content/code_sandbox/src/codegen_new/codegen_ops_mmx_cmp.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 502 |
```c
#if defined __amd64__ || defined _M_X64
# include <stdint.h>
# include <86box/86box.h>
# include "cpu.h"
# include <86box/mem.h>
# include <86box/plat_unused.h>
# include "codegen.h"
# include "codegen_allocator.h"
# include "codegen_backend.h"
# include "codegen_backend_x86-64_defs.h"
# include "codegen_backend_x86-64_ops_sse.h"
# include "codegen_backend_x86-64_ops_helpers.h"
void
host_x86_ADDPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x58, 0xc0 | src_reg | (dst_reg << 3)); /*ADDPS dst_reg, src_reg*/
}
void
host_x86_ADDSD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x58, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_CMPPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg, int type)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x0f, 0xc2, 0xc0 | src_reg | (dst_reg << 3), type); /*CMPPS dst_reg, src_reg, type*/
}
void
host_x86_COMISD_XREG_XREG(codeblock_t *block, int src_reg_a, int src_reg_b)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x2e, 0xc0 | src_reg_b | (src_reg_a << 3));
}
void
host_x86_CVTDQ2PS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x5b, 0xc0 | src_reg | (dst_reg << 3)); /*CVTDQ2PS dst_reg, src_reg*/
}
void
host_x86_CVTPS2DQ_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x5b, 0xc0 | src_reg | (dst_reg << 3)); /*CVTPS2DQ dst_reg, src_reg*/
}
void
host_x86_CVTSD2SI_REG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x2d, 0xc0 | src_reg | (dst_reg << 3)); /*CVTSD2SI dst_reg, src_reg*/
}
void
host_x86_CVTSD2SI_REG64_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0xf2, 0x48, 0x0f, 0x2d); /*CVTSD2SI dst_reg, src_reg*/
codegen_addbyte(block, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_CVTSD2SS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x5a, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_CVTSI2SD_XREG_REG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x2a, 0xc0 | src_reg | (dst_reg << 3)); /*CVTSI2SD dst_reg, src_reg*/
}
void
host_x86_CVTSI2SS_XREG_REG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf3, 0x0f, 0x2a, 0xc0 | src_reg | (dst_reg << 3)); /*CVTSI2SD dst_reg, src_reg*/
}
void
host_x86_CVTSI2SD_XREG_REG64(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0xf2, 0x48, 0x0f, 0x2a); /*CVTSI2SD dst_reg, src_reg*/
codegen_addbyte(block, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_CVTSS2SD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf3, 0x0f, 0x5a, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_CVTSS2SD_XREG_BASE_INDEX(codeblock_t *block, int dst_reg, int base_reg, int idx_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf3, 0x0f, 0x5a, 0x04 | (dst_reg << 3)); /*CVTSS2SD XMMx, [base_reg + idx_reg]*/
codegen_addbyte(block, base_reg | (idx_reg << 3));
}
void
host_x86_DIVSD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x5e, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_DIVSS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf3, 0x0f, 0x5e, 0xc0 | src_reg | (dst_reg << 3)); /*DIVSS dst_reg, src_reg*/
}
void
host_x86_LDMXCSR(codeblock_t *block, void *p)
{
int offset = (uintptr_t) p - (((uintptr_t) &cpu_state) + 128);
if (offset >= -128 && offset < 127) {
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x0f, 0xae, 0x50 | REG_EBP, offset); /*LDMXCSR offset[EBP]*/
} else if (offset < (1ULL << 32)) {
codegen_alloc_bytes(block, 7);
codegen_addbyte3(block, 0x0f, 0xae, 0x90 | REG_EBP); /*LDMXCSR offset[EBP]*/
codegen_addlong(block, offset);
} else {
fatal("host_x86_LDMXCSR - out of range %p\n", p);
}
}
void
host_x86_MAXSD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x5f, 0xc0 | src_reg | (dst_reg << 3)); /*MAXSD dst_reg, src_reg*/
}
void
host_x86_MOVD_BASE_INDEX_XREG(codeblock_t *block, int base_reg, int idx_reg, int src_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x7e, 0x04 | (src_reg << 3)); /*MOVD XMMx, [base_reg + idx_reg]*/
codegen_addbyte(block, base_reg | (idx_reg << 3));
}
void
host_x86_MOVD_REG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x7e, 0xc0 | dst_reg | (src_reg << 3));
}
void
host_x86_MOVD_XREG_BASE_INDEX(codeblock_t *block, int dst_reg, int base_reg, int idx_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x6e, 0x04 | (dst_reg << 3)); /*MOVD XMMx, [base_reg + idx_reg]*/
codegen_addbyte(block, base_reg | (idx_reg << 3));
}
void
host_x86_MOVD_XREG_REG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x6e, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_MOVQ_ABS_XREG(codeblock_t *block, void *p, int src_reg)
{
int offset = (uintptr_t) p - (((uintptr_t) &cpu_state) + 128);
if (src_reg & 8)
fatal("host_x86_MOVQ_ABS_REG reg & 8\n");
if (offset >= -128 && offset < 127) {
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0xd6, 0x45 | (src_reg << 3)); /*MOVQ offset[EBP], src_reg*/
codegen_addbyte(block, offset);
} else {
if ((uintptr_t) p >> 32)
fatal("host_x86_MOVQ_ABS_REG - out of range %p\n", p);
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0xd6, 0x04 | (src_reg << 3)); /*MOVQ [p], src_reg*/
codegen_addbyte(block, 0x25);
codegen_addlong(block, (uint32_t) (uintptr_t) p);
}
}
void
host_x86_MOVQ_ABS_REG_REG_SHIFT_XREG(codeblock_t *block, uint32_t addr, int src_reg_a, int src_reg_b, int shift, int src_reg)
{
if ((src_reg & 8) || (src_reg_a & 8) || (src_reg_b & 8))
fatal("host_x86_MOVQ_ABS_REG_REG_SHIFT_REG - bad reg\n");
if (addr < 0x80 || addr >= 0xffffff80) {
codegen_alloc_bytes(block, 6);
codegen_addbyte3(block, 0x66, 0x0f, 0xd6); /*MOVQ addr[src_reg_a + src_reg_b << shift], XMMx*/
codegen_addbyte3(block, 0x44 | (src_reg << 3), src_reg_a | (src_reg_b << 3) | (shift << 6), addr & 0xff);
} else {
codegen_alloc_bytes(block, 9);
codegen_addbyte3(block, 0x66, 0x0f, 0xd6); /*MOVQ addr[src_reg_a + src_reg_b << shift], XMMx*/
codegen_addbyte2(block, 0x84 | (src_reg << 3), src_reg_a | (src_reg_b << 3) | (shift << 6));
codegen_addlong(block, addr);
}
}
void
host_x86_MOVQ_BASE_INDEX_XREG(codeblock_t *block, int base_reg, int idx_reg, int src_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0xd6, 0x04 | (src_reg << 3)); /*MOVD XMMx, [base_reg + idx_reg]*/
codegen_addbyte(block, base_reg | (idx_reg << 3));
}
void
host_x86_MOVQ_BASE_OFFSET_XREG(codeblock_t *block, int base_reg, int offset, int src_reg)
{
if (offset >= -128 && offset < 127) {
if (base_reg == REG_RSP) {
codegen_alloc_bytes(block, 6);
codegen_addbyte4(block, 0x66, 0x0f, 0xd6, 0x44 | (src_reg << 3)); /*MOVQ [RSP + offset], XMMx*/
codegen_addbyte2(block, 0x24, offset);
} else {
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0xd6, 0x40 | base_reg | (src_reg << 3)); /*MOVQ [base_reg + offset], XMMx*/
codegen_addbyte(block, offset);
}
} else
fatal("MOVQ_BASE_OFFSET_XREG - offset %i\n", offset);
}
void
host_x86_MOVQ_XREG_ABS(codeblock_t *block, int dst_reg, void *p)
{
int offset = (uintptr_t) p - (((uintptr_t) &cpu_state) + 128);
if (dst_reg & 8)
fatal("host_x86_MOVQ_REG_ABS reg & 8\n");
if (offset >= -128 && offset < 127) {
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0xf3, 0x0f, 0x7e, 0x45 | (dst_reg << 3)); /*MOVQ offset[EBP], src_reg*/
codegen_addbyte(block, offset);
} else {
if ((uintptr_t) p >> 32)
fatal("host_x86_MOVQ_REG_ABS - out of range %p\n", p);
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0xf3, 0x0f, 0x7e, 0x04 | (dst_reg << 3)); /*MOVQ [p], src_reg*/
codegen_addbyte(block, 0x25);
codegen_addlong(block, (uint32_t) (uintptr_t) p);
}
}
void
host_x86_MOVQ_XREG_ABS_REG_REG_SHIFT(codeblock_t *block, int dst_reg, uint32_t addr, int src_reg_a, int src_reg_b, int shift)
{
if ((dst_reg & 8) || (src_reg_a & 8) || (src_reg_b & 8))
fatal("host_x86_MOVQ_REG_ABS_REG_REG_SHIFT - bad reg\n");
if (addr < 0x80 || addr >= 0xffffff80) {
codegen_alloc_bytes(block, 6);
codegen_addbyte3(block, 0xf3, 0x0f, 0x7e); /*MOVQ XMMx, addr[src_reg_a + src_reg_b << shift]*/
codegen_addbyte3(block, 0x44 | (dst_reg << 3), src_reg_a | (src_reg_b << 3) | (shift << 6), addr & 0xff);
} else {
codegen_alloc_bytes(block, 9);
codegen_addbyte3(block, 0xf3, 0x0f, 0x7e); /*MOVQ XMMx, addr[src_reg_a + src_reg_b << shift]*/
codegen_addbyte2(block, 0x84 | (dst_reg << 3), src_reg_a | (src_reg_b << 3) | (shift << 6));
codegen_addlong(block, addr);
}
}
void
host_x86_MOVQ_XREG_BASE_INDEX(codeblock_t *block, int dst_reg, int base_reg, int idx_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0xf3, 0x0f, 0x7e, 0x04 | (dst_reg << 3)); /*MOVQ XMMx, [base_reg + idx_reg]*/
codegen_addbyte(block, base_reg | (idx_reg << 3));
}
void
host_x86_MOVQ_XREG_BASE_OFFSET(codeblock_t *block, int dst_reg, int base_reg, int offset)
{
if (offset >= -128 && offset < 127) {
if (base_reg == REG_ESP) {
codegen_alloc_bytes(block, 6);
codegen_addbyte4(block, 0xf3, 0x0f, 0x7e, 0x44 | (dst_reg << 3)); /*MOVQ XMMx, [ESP + offset]*/
codegen_addbyte2(block, 0x24, offset);
} else {
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0xf3, 0x0f, 0x7e, 0x40 | base_reg | (dst_reg << 3)); /*MOVQ XMMx, [base_reg + offset]*/
codegen_addbyte(block, offset);
}
} else
fatal("MOVQ_REG_BASE_OFFSET - offset %i\n", offset);
}
void
host_x86_MOVQ_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf3, 0x0f, 0x7e, 0xc0 | src_reg | (dst_reg << 3)); /*MOVQ dst_reg, src_reg*/
}
void
host_x86_MOVQ_REG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x48, 0x0f, 0x7e); /*MOVQ dst_reg, src_reg*/
codegen_addbyte(block, 0xc0 | dst_reg | (src_reg << 3));
}
void
host_x86_MOVQ_XREG_REG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x48, 0x0f, 0x6e); /*MOVQ dst_reg, src_reg*/
codegen_addbyte(block, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_MAXPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x5f, 0xc0 | src_reg | (dst_reg << 3)); /*MAXPS dst_reg, src_reg*/
}
void
host_x86_MINPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x5d, 0xc0 | src_reg | (dst_reg << 3)); /*MINPS dst_reg, src_reg*/
}
void
host_x86_MULPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x59, 0xc0 | src_reg | (dst_reg << 3)); /*MULPS dst_reg, src_reg*/
}
void
host_x86_MULSD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x59, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_PACKSSWB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0x63, 0xc0 | src_reg | (dst_reg << 3)); /*PACKSSWB dst_reg, src_reg*/
codegen_addbyte4(block, 0x66, 0x0f, 0x70, 0xc0 | dst_reg | (dst_reg << 3)); /*PSHUFD dst_reg, dst_reg, 0x88 (move bits 64-95 to 32-63)*/
codegen_addbyte(block, 0x88);
}
void
host_x86_PACKSSDW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0x6b, 0xc0 | src_reg | (dst_reg << 3)); /*PACKSSDW dst_reg, src_reg*/
codegen_addbyte4(block, 0x66, 0x0f, 0x70, 0xc0 | dst_reg | (dst_reg << 3)); /*PSHUFD dst_reg, dst_reg, 0x88 (move bits 64-95 to 32-63)*/
codegen_addbyte(block, 0x88);
}
void
host_x86_PACKUSWB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0x67, 0xc0 | src_reg | (dst_reg << 3)); /*PACKUSWB dst_reg, src_reg*/
codegen_addbyte4(block, 0x66, 0x0f, 0x70, 0xc0 | dst_reg | (dst_reg << 3)); /*PSHUFD dst_reg, dst_reg, 0x88 (move bits 64-95 to 32-63)*/
codegen_addbyte(block, 0x88);
}
void
host_x86_PADDB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xfc, 0xc0 | src_reg | (dst_reg << 3)); /*PADDB dst_reg, src_reg*/
}
void
host_x86_PADDW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xfd, 0xc0 | src_reg | (dst_reg << 3)); /*PADDW dst_reg, src_reg*/
}
void
host_x86_PADDD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xfe, 0xc0 | src_reg | (dst_reg << 3)); /*PADDD dst_reg, src_reg*/
}
void
host_x86_PADDSB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xec, 0xc0 | src_reg | (dst_reg << 3)); /*PADDSB dst_reg, src_reg*/
}
void
host_x86_PADDSW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xed, 0xc0 | src_reg | (dst_reg << 3)); /*PADDSW dst_reg, src_reg*/
}
void
host_x86_PADDUSB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xdc, 0xc0 | src_reg | (dst_reg << 3)); /*PADDUSB dst_reg, src_reg*/
}
void
host_x86_PADDUSW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xdd, 0xc0 | src_reg | (dst_reg << 3)); /*PADDUSW dst_reg, src_reg*/
}
void
host_x86_PAND_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xdb, 0xc0 | src_reg | (dst_reg << 3)); /*PAND dst_reg, src_reg*/
}
void
host_x86_PANDN_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xdf, 0xc0 | src_reg | (dst_reg << 3)); /*PANDN dst_reg, src_reg*/
}
void
host_x86_POR_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xeb, 0xc0 | src_reg | (dst_reg << 3)); /*POR dst_reg, src_reg*/
}
void
host_x86_PXOR_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xef, 0xc0 | src_reg | (dst_reg << 3)); /*PXOR dst_reg, src_reg*/
}
void
host_x86_PCMPEQB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x74, 0xc0 | src_reg | (dst_reg << 3)); /*PCMPEQB dst_reg, src_reg*/
}
void
host_x86_PCMPEQW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x75, 0xc0 | src_reg | (dst_reg << 3)); /*PCMPEQW dst_reg, src_reg*/
}
void
host_x86_PCMPEQD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x76, 0xc0 | src_reg | (dst_reg << 3)); /*PCMPEQD dst_reg, src_reg*/
}
void
host_x86_PCMPGTB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x64, 0xc0 | src_reg | (dst_reg << 3)); /*PCMPGTB dst_reg, src_reg*/
}
void
host_x86_PCMPGTW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x65, 0xc0 | src_reg | (dst_reg << 3)); /*PCMPGTW dst_reg, src_reg*/
}
void
host_x86_PCMPGTD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x66, 0xc0 | src_reg | (dst_reg << 3)); /*PCMPGTD dst_reg, src_reg*/
}
void
host_x86_PMADDWD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xf5, 0xc0 | src_reg | (dst_reg << 3)); /*PMULLW dst_reg, src_reg*/
}
void
host_x86_PMULHW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xe5, 0xc0 | src_reg | (dst_reg << 3)); /*PMULLW dst_reg, src_reg*/
}
void
host_x86_PMULLW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xd5, 0xc0 | src_reg | (dst_reg << 3)); /*PMULLW dst_reg, src_reg*/
}
void
host_x86_PSLLW_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x71, 0xc0 | 0x30 | dst_reg); /*PSLLW dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSLLD_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x72, 0xc0 | 0x30 | dst_reg); /*PSLLD dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSLLQ_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x73, 0xc0 | 0x30 | dst_reg); /*PSLLD dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSRAW_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x71, 0xc0 | 0x20 | dst_reg); /*PSRAW dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSRAD_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x72, 0xc0 | 0x20 | dst_reg); /*PSRAD dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSRAQ_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x73, 0xc0 | 0x20 | dst_reg); /*PSRAD dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSRLW_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x71, 0xc0 | 0x10 | dst_reg); /*PSRLW dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSRLD_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x72, 0xc0 | 0x10 | dst_reg); /*PSRLD dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSRLQ_XREG_IMM(codeblock_t *block, int dst_reg, int shift)
{
codegen_alloc_bytes(block, 5);
codegen_addbyte4(block, 0x66, 0x0f, 0x73, 0xc0 | 0x10 | dst_reg); /*PSRLD dst_reg, imm*/
codegen_addbyte(block, shift);
}
void
host_x86_PSUBB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xf8, 0xc0 | src_reg | (dst_reg << 3)); /*PADDB dst_reg, src_reg*/
}
void
host_x86_PSUBW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xf9, 0xc0 | src_reg | (dst_reg << 3)); /*PADDW dst_reg, src_reg*/
}
void
host_x86_PSUBD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xfa, 0xc0 | src_reg | (dst_reg << 3)); /*PADDD dst_reg, src_reg*/
}
void
host_x86_PSUBSB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xe8, 0xc0 | src_reg | (dst_reg << 3)); /*PSUBSB dst_reg, src_reg*/
}
void
host_x86_PSUBSW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xe9, 0xc0 | src_reg | (dst_reg << 3)); /*PSUBSW dst_reg, src_reg*/
}
void
host_x86_PSUBUSB_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xd8, 0xc0 | src_reg | (dst_reg << 3)); /*PSUBUSB dst_reg, src_reg*/
}
void
host_x86_PSUBUSW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0xd9, 0xc0 | src_reg | (dst_reg << 3)); /*PSUBUSW dst_reg, src_reg*/
}
void
host_x86_PUNPCKHBW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0x60, 0xc0 | src_reg | (dst_reg << 3)); /*PUNPCKLBW dst_reg, src_reg*/
codegen_addbyte4(block, 0x66, 0x0f, 0x70, 0xc0 | dst_reg | (dst_reg << 3)); /*PSHUFD dst_reg, dst_reg, 0xee (move top 64-bits to low 64-bits)*/
codegen_addbyte(block, 0xee);
}
void
host_x86_PUNPCKHWD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0x61, 0xc0 | src_reg | (dst_reg << 3)); /*PUNPCKLWD dst_reg, src_reg*/
codegen_addbyte4(block, 0x66, 0x0f, 0x70, 0xc0 | dst_reg | (dst_reg << 3)); /*PSHUFD dst_reg, dst_reg, 0xee (move top 64-bits to low 64-bits)*/
codegen_addbyte(block, 0xee);
}
void
host_x86_PUNPCKHDQ_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 9);
codegen_addbyte4(block, 0x66, 0x0f, 0x62, 0xc0 | src_reg | (dst_reg << 3)); /*PUNPCKLDQ dst_reg, src_reg*/
codegen_addbyte4(block, 0x66, 0x0f, 0x70, 0xc0 | dst_reg | (dst_reg << 3)); /*PSHUFD dst_reg, dst_reg, 0xee (move top 64-bits to low 64-bits)*/
codegen_addbyte(block, 0xee);
}
void
host_x86_PUNPCKLBW_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x60, 0xc0 | src_reg | (dst_reg << 3)); /*PUNPCKLBW dst_reg, src_reg*/
}
void
host_x86_PUNPCKLWD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x61, 0xc0 | src_reg | (dst_reg << 3)); /*PUNPCKLWD dst_reg, src_reg*/
}
void
host_x86_PUNPCKLDQ_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0x66, 0x0f, 0x62, 0xc0 | src_reg | (dst_reg << 3)); /*PUNPCKLDQ dst_reg, src_reg*/
}
void
host_x86_SQRTSD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x51, 0xc0 | src_reg | (dst_reg << 3)); /*SQRTSD dst_reg, src_reg*/
}
void
host_x86_SQRTSS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf3, 0x0f, 0x51, 0xc0 | src_reg | (dst_reg << 3)); /*SQRTSS dst_reg, src_reg*/
}
void
host_x86_SUBPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x5c, 0xc0 | src_reg | (dst_reg << 3)); /*SUBPS dst_reg, src_reg*/
}
void
host_x86_SUBSD_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 4);
codegen_addbyte4(block, 0xf2, 0x0f, 0x5c, 0xc0 | src_reg | (dst_reg << 3));
}
void
host_x86_UNPCKLPS_XREG_XREG(codeblock_t *block, int dst_reg, int src_reg)
{
codegen_alloc_bytes(block, 3);
codegen_addbyte3(block, 0x0f, 0x14, 0xc0 | src_reg | (dst_reg << 3));
}
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_backend_x86-64_ops_sse.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 9,517 |
```objective-c
#ifndef _CODEGEN_IR_DEFS_
#define _CODEGEN_IR_DEFS_
#include "codegen_reg.h"
#define UOP_REG(reg, size, version) ((reg) | (size) | (version << 8))
/*uOP is a barrier. All previous uOPs must have completed before this one executes.
All registers must have been written back or discarded.
This should be used when calling external functions that may change any emulated
registers.*/
#define UOP_TYPE_BARRIER (1u << 31)
/*uOP is a barrier. All previous uOPs must have completed before this one executes.
All registers must have been written back, but do not have to be discarded.
This should be used when calling functions that preserve registers, but can cause
the code block to exit (eg memory load/store functions).*/
#define UOP_TYPE_ORDER_BARRIER (1 << 27)
/*uOP uses source and dest registers*/
#define UOP_TYPE_PARAMS_REGS (1 << 28)
/*uOP uses pointer*/
#define UOP_TYPE_PARAMS_POINTER (1 << 29)
/*uOP uses immediate data*/
#define UOP_TYPE_PARAMS_IMM (1 << 30)
/*uOP is a jump, with the destination uOP in uop->jump_dest_uop. The compiler must
set jump_dest in the destination uOP to the address of the branch offset to be
written when known.*/
#define UOP_TYPE_JUMP (1 << 26)
/*uOP is the destination of a jump, and must set the destination offset of the jump
at compile time.*/
#define UOP_TYPE_JUMP_DEST (1 << 25)
#define UOP_LOAD_FUNC_ARG_0 (UOP_TYPE_PARAMS_REGS | 0x00)
#define UOP_LOAD_FUNC_ARG_1 (UOP_TYPE_PARAMS_REGS | 0x01)
#define UOP_LOAD_FUNC_ARG_2 (UOP_TYPE_PARAMS_REGS | 0x02)
#define UOP_LOAD_FUNC_ARG_3 (UOP_TYPE_PARAMS_REGS | 0x03)
#define UOP_LOAD_FUNC_ARG_0_IMM (UOP_TYPE_PARAMS_IMM | 0x08 | UOP_TYPE_BARRIER)
#define UOP_LOAD_FUNC_ARG_1_IMM (UOP_TYPE_PARAMS_IMM | 0x09 | UOP_TYPE_BARRIER)
#define UOP_LOAD_FUNC_ARG_2_IMM (UOP_TYPE_PARAMS_IMM | 0x0a | UOP_TYPE_BARRIER)
#define UOP_LOAD_FUNC_ARG_3_IMM (UOP_TYPE_PARAMS_IMM | 0x0b | UOP_TYPE_BARRIER)
#define UOP_CALL_FUNC (UOP_TYPE_PARAMS_POINTER | 0x10 | UOP_TYPE_BARRIER)
/*UOP_CALL_INSTRUCTION_FUNC - call instruction handler at p, check return value and exit block if non-zero*/
#define UOP_CALL_INSTRUCTION_FUNC (UOP_TYPE_PARAMS_POINTER | 0x11 | UOP_TYPE_BARRIER)
#define UOP_STORE_P_IMM (UOP_TYPE_PARAMS_IMM | 0x12)
#define UOP_STORE_P_IMM_8 (UOP_TYPE_PARAMS_IMM | 0x13)
/*UOP_LOAD_SEG - load segment in src_reg_a to segment p via loadseg(), check return value and exit block if non-zero*/
#define UOP_LOAD_SEG (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x14 | UOP_TYPE_BARRIER)
/*UOP_JMP - jump to ptr*/
#define UOP_JMP (UOP_TYPE_PARAMS_POINTER | 0x15 | UOP_TYPE_ORDER_BARRIER)
/*UOP_CALL_FUNC - call instruction handler at p, dest_reg = return value*/
#define UOP_CALL_FUNC_RESULT (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x16 | UOP_TYPE_BARRIER)
/*UOP_JMP_DEST - jump to ptr*/
#define UOP_JMP_DEST (UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x17 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
#define UOP_NOP_BARRIER (UOP_TYPE_BARRIER | 0x18)
#define UOP_STORE_P_IMM_16 (UOP_TYPE_PARAMS_IMM | 0x19)
#ifdef DEBUG_EXTRA
/*UOP_LOG_INSTR - log non-recompiled instruction in imm_data*/
# define UOP_LOG_INSTR (UOP_TYPE_PARAMS_IMM | 0x1f)
#endif
/*UOP_MOV_PTR - dest_reg = p*/
#define UOP_MOV_PTR (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x20)
/*UOP_MOV_IMM - dest_reg = imm_data*/
#define UOP_MOV_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x21)
/*UOP_MOV - dest_reg = src_reg_a*/
#define UOP_MOV (UOP_TYPE_PARAMS_REGS | 0x22)
/*UOP_MOVZX - dest_reg = zero_extend(src_reg_a)*/
#define UOP_MOVZX (UOP_TYPE_PARAMS_REGS | 0x23)
/*UOP_MOVSX - dest_reg = sign_extend(src_reg_a)*/
#define UOP_MOVSX (UOP_TYPE_PARAMS_REGS | 0x24)
/*UOP_MOV_DOUBLE_INT - dest_reg = (double)src_reg_a*/
#define UOP_MOV_DOUBLE_INT (UOP_TYPE_PARAMS_REGS | 0x25)
/*UOP_MOV_INT_DOUBLE - dest_reg = (int)src_reg_a. New rounding control in src_reg_b, old rounding control in src_reg_c*/
#define UOP_MOV_INT_DOUBLE (UOP_TYPE_PARAMS_REGS | 0x26)
/*UOP_MOV_INT_DOUBLE_64 - dest_reg = (int)src_reg_a. New rounding control in src_reg_b, old rounding control in src_reg_c*/
#define UOP_MOV_INT_DOUBLE_64 (UOP_TYPE_PARAMS_REGS | 0x27)
/*UOP_MOV_REG_PTR - dest_reg = *p*/
#define UOP_MOV_REG_PTR (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x28)
/*UOP_MOVZX_REG_PTR_8 - dest_reg = *(uint8_t *)p*/
#define UOP_MOVZX_REG_PTR_8 (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x29)
/*UOP_MOVZX_REG_PTR_16 - dest_reg = *(uint16_t *)p*/
#define UOP_MOVZX_REG_PTR_16 (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x2a)
/*UOP_ADD - dest_reg = src_reg_a + src_reg_b*/
#define UOP_ADD (UOP_TYPE_PARAMS_REGS | 0x30)
/*UOP_ADD_IMM - dest_reg = src_reg_a + immediate*/
#define UOP_ADD_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x31)
/*UOP_AND - dest_reg = src_reg_a & src_reg_b*/
#define UOP_AND (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x32)
/*UOP_AND_IMM - dest_reg = src_reg_a & immediate*/
#define UOP_AND_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x33)
/*UOP_ADD_LSHIFT - dest_reg = src_reg_a + (src_reg_b << imm_data)
Intended for EA calcluations, imm_data must be between 0 and 3*/
#define UOP_ADD_LSHIFT (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x34)
/*UOP_OR - dest_reg = src_reg_a | src_reg_b*/
#define UOP_OR (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x35)
/*UOP_OR_IMM - dest_reg = src_reg_a | immediate*/
#define UOP_OR_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x36)
/*UOP_SUB - dest_reg = src_reg_a - src_reg_b*/
#define UOP_SUB (UOP_TYPE_PARAMS_REGS | 0x37)
/*UOP_SUB_IMM - dest_reg = src_reg_a - immediate*/
#define UOP_SUB_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x38)
/*UOP_XOR - dest_reg = src_reg_a ^ src_reg_b*/
#define UOP_XOR (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x39)
/*UOP_XOR_IMM - dest_reg = src_reg_a ^ immediate*/
#define UOP_XOR_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x3a)
/*UOP_ANDN - dest_reg = ~src_reg_a & src_reg_b*/
#define UOP_ANDN (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x3b)
/*UOP_MEM_LOAD_ABS - dest_reg = src_reg_a:[immediate]*/
#define UOP_MEM_LOAD_ABS (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x40 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_LOAD_REG - dest_reg = src_reg_a:[src_reg_b]*/
#define UOP_MEM_LOAD_REG (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x41 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_ABS - src_reg_a:[immediate] = src_reg_b*/
#define UOP_MEM_STORE_ABS (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x42 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_REG - src_reg_a:[src_reg_b] = src_reg_c*/
#define UOP_MEM_STORE_REG (UOP_TYPE_PARAMS_REGS | 0x43 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_IMM_8 - byte src_reg_a:[src_reg_b] = imm_data*/
#define UOP_MEM_STORE_IMM_8 (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x44 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_IMM_16 - word src_reg_a:[src_reg_b] = imm_data*/
#define UOP_MEM_STORE_IMM_16 (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x45 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_IMM_32 - long src_reg_a:[src_reg_b] = imm_data*/
#define UOP_MEM_STORE_IMM_32 (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x46 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_LOAD_SINGLE - dest_reg = (float)src_reg_a:[src_reg_b]*/
#define UOP_MEM_LOAD_SINGLE (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x47 | UOP_TYPE_ORDER_BARRIER)
/*UOP_CMP_IMM_JZ - if (src_reg_a == imm_data) then jump to ptr*/
#define UOP_CMP_IMM_JZ (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x48 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_LOAD_DOUBLE - dest_reg = (double)src_reg_a:[src_reg_b]*/
#define UOP_MEM_LOAD_DOUBLE (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x49 | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_SINGLE - src_reg_a:[src_reg_b] = src_reg_c*/
#define UOP_MEM_STORE_SINGLE (UOP_TYPE_PARAMS_REGS | 0x4a | UOP_TYPE_ORDER_BARRIER)
/*UOP_MEM_STORE_DOUBLE - src_reg_a:[src_reg_b] = src_reg_c*/
#define UOP_MEM_STORE_DOUBLE (UOP_TYPE_PARAMS_REGS | 0x4b | UOP_TYPE_ORDER_BARRIER)
/*UOP_CMP_JB - if (src_reg_a < src_reg_b) then jump to ptr*/
#define UOP_CMP_JB (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x4c | UOP_TYPE_ORDER_BARRIER)
/*UOP_CMP_JNBE - if (src_reg_a > src_reg_b) then jump to ptr*/
#define UOP_CMP_JNBE (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_POINTER | 0x4d | UOP_TYPE_ORDER_BARRIER)
/*UOP_SAR - dest_reg = src_reg_a >> src_reg_b*/
#define UOP_SAR (UOP_TYPE_PARAMS_REGS | 0x50)
/*UOP_SAR_IMM - dest_reg = src_reg_a >> immediate*/
#define UOP_SAR_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x51)
/*UOP_SHL - dest_reg = src_reg_a << src_reg_b*/
#define UOP_SHL (UOP_TYPE_PARAMS_REGS | 0x52)
/*UOP_SHL_IMM - dest_reg = src_reg_a << immediate*/
#define UOP_SHL_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x53)
/*UOP_SHR - dest_reg = src_reg_a >> src_reg_b*/
#define UOP_SHR (UOP_TYPE_PARAMS_REGS | 0x54)
/*UOP_SHR_IMM - dest_reg = src_reg_a >> immediate*/
#define UOP_SHR_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x55)
/*UOP_ROL - dest_reg = src_reg_a rotate<< src_reg_b*/
#define UOP_ROL (UOP_TYPE_PARAMS_REGS | 0x56)
/*UOP_ROL_IMM - dest_reg = src_reg_a rotate<< immediate*/
#define UOP_ROL_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x57)
/*UOP_ROR - dest_reg = src_reg_a rotate>> src_reg_b*/
#define UOP_ROR (UOP_TYPE_PARAMS_REGS | 0x58)
/*UOP_ROR_IMM - dest_reg = src_reg_a rotate>> immediate*/
#define UOP_ROR_IMM (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | 0x59)
/*UOP_CMP_IMM_JZ_DEST - if (src_reg_a == imm_data) then jump to ptr*/
#define UOP_CMP_IMM_JZ_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x60 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_IMM_JNZ_DEST - if (src_reg_a != imm_data) then jump to ptr*/
#define UOP_CMP_IMM_JNZ_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x61 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JB_DEST - if (src_reg_a < src_reg_b) then jump to ptr*/
#define UOP_CMP_JB_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x62 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JNB_DEST - if (src_reg_a >= src_reg_b) then jump to ptr*/
#define UOP_CMP_JNB_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x63 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JO_DEST - if (src_reg_a < src_reg_b) then jump to ptr*/
#define UOP_CMP_JO_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x64 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JNO_DEST - if (src_reg_a >= src_reg_b) then jump to ptr*/
#define UOP_CMP_JNO_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x65 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JZ_DEST - if (src_reg_a == src_reg_b) then jump to ptr*/
#define UOP_CMP_JZ_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x66 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JNZ_DEST - if (src_reg_a != src_reg_b) then jump to ptr*/
#define UOP_CMP_JNZ_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x67 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JL_DEST - if (signed)(src_reg_a < src_reg_b) then jump to ptr*/
#define UOP_CMP_JL_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x68 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JNL_DEST - if (signed)(src_reg_a >= src_reg_b) then jump to ptr*/
#define UOP_CMP_JNL_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x69 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JBE_DEST - if (src_reg_a <= src_reg_b) then jump to ptr*/
#define UOP_CMP_JBE_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x6a | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JNBE_DEST - if (src_reg_a > src_reg_b) then jump to ptr*/
#define UOP_CMP_JNBE_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x6b | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JLE_DEST - if (signed)(src_reg_a <= src_reg_b) then jump to ptr*/
#define UOP_CMP_JLE_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x6c | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_CMP_JNLE_DEST - if (signed)(src_reg_a > src_reg_b) then jump to ptr*/
#define UOP_CMP_JNLE_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x6d | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_TEST_JNS_DEST - if (src_reg_a positive) then jump to ptr*/
#define UOP_TEST_JNS_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x70 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_TEST_JS_DEST - if (src_reg_a positive) then jump to ptr*/
#define UOP_TEST_JS_DEST (UOP_TYPE_PARAMS_REGS | UOP_TYPE_PARAMS_IMM | UOP_TYPE_PARAMS_POINTER | 0x71 | UOP_TYPE_ORDER_BARRIER | UOP_TYPE_JUMP)
/*UOP_FP_ENTER - must be called before any FPU register accessed*/
#define UOP_FP_ENTER (UOP_TYPE_PARAMS_IMM | 0x80 | UOP_TYPE_BARRIER)
/*UOP_FADD - (floating point) dest_reg = src_reg_a + src_reg_b*/
#define UOP_FADD (UOP_TYPE_PARAMS_REGS | 0x81)
/*UOP_FSUB - (floating point) dest_reg = src_reg_a - src_reg_b*/
#define UOP_FSUB (UOP_TYPE_PARAMS_REGS | 0x82)
/*UOP_FMUL - (floating point) dest_reg = src_reg_a * src_reg_b*/
#define UOP_FMUL (UOP_TYPE_PARAMS_REGS | 0x83)
/*UOP_FDIV - (floating point) dest_reg = src_reg_a / src_reg_b*/
#define UOP_FDIV (UOP_TYPE_PARAMS_REGS | 0x84)
/*UOP_FCOM - dest_reg = flags from compare(src_reg_a, src_reg_b)*/
#define UOP_FCOM (UOP_TYPE_PARAMS_REGS | 0x85)
/*UOP_FABS - dest_reg = fabs(src_reg_a)*/
#define UOP_FABS (UOP_TYPE_PARAMS_REGS | 0x86)
/*UOP_FCHS - dest_reg = fabs(src_reg_a)*/
#define UOP_FCHS (UOP_TYPE_PARAMS_REGS | 0x87)
/*UOP_FTST - dest_reg = flags from compare(src_reg_a, 0)*/
#define UOP_FTST (UOP_TYPE_PARAMS_REGS | 0x88)
/*UOP_FSQRT - dest_reg = fsqrt(src_reg_a)*/
#define UOP_FSQRT (UOP_TYPE_PARAMS_REGS | 0x89)
/*UOP_MMX_ENTER - must be called before any MMX registers accessed*/
#define UOP_MMX_ENTER (UOP_TYPE_PARAMS_IMM | 0x90 | UOP_TYPE_BARRIER)
/*UOP_PADDB - (packed byte) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDB (UOP_TYPE_PARAMS_REGS | 0x91)
/*UOP_PADDW - (packed word) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDW (UOP_TYPE_PARAMS_REGS | 0x92)
/*UOP_PADDD - (packed long) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDD (UOP_TYPE_PARAMS_REGS | 0x93)
/*UOP_PADDSB - (packed byte with signed saturation) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDSB (UOP_TYPE_PARAMS_REGS | 0x94)
/*UOP_PADDSW - (packed word with signed saturation) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDSW (UOP_TYPE_PARAMS_REGS | 0x95)
/*UOP_PADDUSB - (packed byte with unsigned saturation) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDUSB (UOP_TYPE_PARAMS_REGS | 0x96)
/*UOP_PADDUSW - (packed word with unsigned saturation) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PADDUSW (UOP_TYPE_PARAMS_REGS | 0x97)
/*UOP_PSUBB - (packed byte) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBB (UOP_TYPE_PARAMS_REGS | 0x98)
/*UOP_PSUBW - (packed word) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBW (UOP_TYPE_PARAMS_REGS | 0x99)
/*UOP_PSUBD - (packed long) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBD (UOP_TYPE_PARAMS_REGS | 0x9a)
/*UOP_PSUBSB - (packed byte with signed saturation) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBSB (UOP_TYPE_PARAMS_REGS | 0x9b)
/*UOP_PSUBSW - (packed word with signed saturation) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBSW (UOP_TYPE_PARAMS_REGS | 0x9c)
/*UOP_PSUBUSB - (packed byte with unsigned saturation) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBUSB (UOP_TYPE_PARAMS_REGS | 0x9d)
/*UOP_PSUBUSW - (packed word with unsigned saturation) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PSUBUSW (UOP_TYPE_PARAMS_REGS | 0x9e)
/*UOP_PSLLW_IMM - (packed word) dest_reg = src_reg_a << immediate*/
#define UOP_PSLLW_IMM (UOP_TYPE_PARAMS_REGS | 0x9f)
/*UOP_PSLLD_IMM - (packed long) dest_reg = src_reg_a << immediate*/
#define UOP_PSLLD_IMM (UOP_TYPE_PARAMS_REGS | 0xa0)
/*UOP_PSLLQ_IMM - (packed quad) dest_reg = src_reg_a << immediate*/
#define UOP_PSLLQ_IMM (UOP_TYPE_PARAMS_REGS | 0xa1)
/*UOP_PSRAW_IMM - (packed word) dest_reg = src_reg_a >> immediate*/
#define UOP_PSRAW_IMM (UOP_TYPE_PARAMS_REGS | 0xa2)
/*UOP_PSRAD_IMM - (packed long) dest_reg = src_reg_a >> immediate*/
#define UOP_PSRAD_IMM (UOP_TYPE_PARAMS_REGS | 0xa3)
/*UOP_PSRAQ_IMM - (packed quad) dest_reg = src_reg_a >> immediate*/
#define UOP_PSRAQ_IMM (UOP_TYPE_PARAMS_REGS | 0xa4)
/*UOP_PSRLW_IMM - (packed word) dest_reg = src_reg_a >> immediate*/
#define UOP_PSRLW_IMM (UOP_TYPE_PARAMS_REGS | 0xa5)
/*UOP_PSRLD_IMM - (packed long) dest_reg = src_reg_a >> immediate*/
#define UOP_PSRLD_IMM (UOP_TYPE_PARAMS_REGS | 0xa6)
/*UOP_PSRLQ_IMM - (packed quad) dest_reg = src_reg_a >> immediate*/
#define UOP_PSRLQ_IMM (UOP_TYPE_PARAMS_REGS | 0xa7)
/*UOP_PCMPEQB - (packed byte) dest_reg = (src_reg_a == src_reg_b) ? ~0 : 0*/
#define UOP_PCMPEQB (UOP_TYPE_PARAMS_REGS | 0xa8)
/*UOP_PCMPEQW - (packed word) dest_reg = (src_reg_a == src_reg_b) ? ~0 : 0*/
#define UOP_PCMPEQW (UOP_TYPE_PARAMS_REGS | 0xa9)
/*UOP_PCMPEQD - (packed long) dest_reg = (src_reg_a == src_reg_b) ? ~0 : 0*/
#define UOP_PCMPEQD (UOP_TYPE_PARAMS_REGS | 0xaa)
/*UOP_PCMPGTB - (packed signed byte) dest_reg = (src_reg_a > src_reg_b) ? ~0 : 0*/
#define UOP_PCMPGTB (UOP_TYPE_PARAMS_REGS | 0xab)
/*UOP_PCMPGTW - (packed signed word) dest_reg = (src_reg_a > src_reg_b) ? ~0 : 0*/
#define UOP_PCMPGTW (UOP_TYPE_PARAMS_REGS | 0xac)
/*UOP_PCMPGTD - (packed signed long) dest_reg = (src_reg_a > src_reg_b) ? ~0 : 0*/
#define UOP_PCMPGTD (UOP_TYPE_PARAMS_REGS | 0xad)
/*UOP_PUNPCKLBW - (packed byte) dest_reg = interleave low src_reg_a/src_reg_b*/
#define UOP_PUNPCKLBW (UOP_TYPE_PARAMS_REGS | 0xae)
/*UOP_PUNPCKLWD - (packed word) dest_reg = interleave low src_reg_a/src_reg_b*/
#define UOP_PUNPCKLWD (UOP_TYPE_PARAMS_REGS | 0xaf)
/*UOP_PUNPCKLDQ - (packed long) dest_reg = interleave low src_reg_a/src_reg_b*/
#define UOP_PUNPCKLDQ (UOP_TYPE_PARAMS_REGS | 0xb0)
/*UOP_PUNPCKHBW - (packed byte) dest_reg = interleave high src_reg_a/src_reg_b*/
#define UOP_PUNPCKHBW (UOP_TYPE_PARAMS_REGS | 0xb1)
/*UOP_PUNPCKHWD - (packed word) dest_reg = interleave high src_reg_a/src_reg_b*/
#define UOP_PUNPCKHWD (UOP_TYPE_PARAMS_REGS | 0xb2)
/*UOP_PUNPCKHDQ - (packed long) dest_reg = interleave high src_reg_a/src_reg_b*/
#define UOP_PUNPCKHDQ (UOP_TYPE_PARAMS_REGS | 0xb3)
/*UOP_PACKSSWB - dest_reg = interleave src_reg_a/src_reg_b, converting words to bytes with signed saturation*/
#define UOP_PACKSSWB (UOP_TYPE_PARAMS_REGS | 0xb4)
/*UOP_PACKSSDW - dest_reg = interleave src_reg_a/src_reg_b, converting longs to words with signed saturation*/
#define UOP_PACKSSDW (UOP_TYPE_PARAMS_REGS | 0xb5)
/*UOP_PACKUSWB - dest_reg = interleave src_reg_a/src_reg_b, converting words to bytes with unsigned saturation*/
#define UOP_PACKUSWB (UOP_TYPE_PARAMS_REGS | 0xb6)
/*UOP_PMULLW - (packed word) dest_reg = (src_reg_a * src_reg_b) & 0xffff*/
#define UOP_PMULLW (UOP_TYPE_PARAMS_REGS | 0xb7)
/*UOP_PMULHW - (packed word) dest_reg = (src_reg_a * src_reg_b) >> 16*/
#define UOP_PMULHW (UOP_TYPE_PARAMS_REGS | 0xb8)
/*UOP_PMADDWD - (packed word) dest_reg = (src_reg_a * src_reg_b) >> 16*/
#define UOP_PMADDWD (UOP_TYPE_PARAMS_REGS | 0xb9)
/*UOP_PFADD - (packed float) dest_reg = src_reg_a + src_reg_b*/
#define UOP_PFADD (UOP_TYPE_PARAMS_REGS | 0xba)
/*UOP_PFSUB - (packed float) dest_reg = src_reg_a - src_reg_b*/
#define UOP_PFSUB (UOP_TYPE_PARAMS_REGS | 0xbb)
/*UOP_PFMUL - (packed float) dest_reg = src_reg_a * src_reg_b*/
#define UOP_PFMUL (UOP_TYPE_PARAMS_REGS | 0xbc)
/*UOP_PFMAX - (packed float) dest_reg = MAX(src_reg_a, src_reg_b)*/
#define UOP_PFMAX (UOP_TYPE_PARAMS_REGS | 0xbd)
/*UOP_PFMIN - (packed float) dest_reg = MIN(src_reg_a, src_reg_b)*/
#define UOP_PFMIN (UOP_TYPE_PARAMS_REGS | 0xbe)
/*UOP_PFCMPEQ - (packed float) dest_reg = (src_reg_a == src_reg_b) ? ~0 : 0*/
#define UOP_PFCMPEQ (UOP_TYPE_PARAMS_REGS | 0xbf)
/*UOP_PFCMPGE - (packed float) dest_reg = (src_reg_a >= src_reg_b) ? ~0 : 0*/
#define UOP_PFCMPGE (UOP_TYPE_PARAMS_REGS | 0xc0)
/*UOP_PFCMPGT - (packed float) dest_reg = (src_reg_a > src_reg_b) ? ~0 : 0*/
#define UOP_PFCMPGT (UOP_TYPE_PARAMS_REGS | 0xc1)
/*UOP_PF2ID - (packed long)dest_reg = (packed float)src_reg_a*/
#define UOP_PF2ID (UOP_TYPE_PARAMS_REGS | 0xc2)
/*UOP_PI2FD - (packed float)dest_reg = (packed long)src_reg_a*/
#define UOP_PI2FD (UOP_TYPE_PARAMS_REGS | 0xc3)
/*UOP_PFRCP - (packed float) dest_reg[0] = dest_reg[1] = 1.0 / src_reg[0]*/
#define UOP_PFRCP (UOP_TYPE_PARAMS_REGS | 0xc4)
/*UOP_PFRSQRT - (packed float) dest_reg[0] = dest_reg[1] = 1.0 / sqrt(src_reg[0])*/
#define UOP_PFRSQRT (UOP_TYPE_PARAMS_REGS | 0xc5)
#define UOP_MAX 0xc6
#define UOP_INVALID 0xff
#define UOP_MASK 0xffff
typedef struct uop_t {
uint32_t type;
ir_reg_t dest_reg_a;
ir_reg_t src_reg_a;
ir_reg_t src_reg_b;
ir_reg_t src_reg_c;
uint32_t imm_data;
void *p;
ir_host_reg_t dest_reg_a_real;
ir_host_reg_t src_reg_a_real, src_reg_b_real, src_reg_c_real;
int jump_dest_uop;
int jump_list_next;
void *jump_dest;
uint32_t pc;
} uop_t;
#define UOP_NR_MAX 4096
typedef struct ir_data_t {
uop_t uops[UOP_NR_MAX];
int wr_pos;
struct codeblock_t *block;
} ir_data_t;
static inline uop_t *
uop_alloc(ir_data_t *ir, uint32_t uop_type)
{
uop_t *uop;
if (ir->wr_pos >= UOP_NR_MAX)
fatal("Exceeded uOP max\n");
uop = &ir->uops[ir->wr_pos++];
uop->dest_reg_a = invalid_ir_reg;
uop->src_reg_a = invalid_ir_reg;
uop->src_reg_b = invalid_ir_reg;
uop->src_reg_c = invalid_ir_reg;
uop->pc = cpu_state.oldpc;
uop->jump_dest_uop = -1;
uop->jump_list_next = -1;
if (uop_type & (UOP_TYPE_BARRIER | UOP_TYPE_ORDER_BARRIER))
codegen_reg_mark_as_required();
return uop;
}
static inline void
uop_set_jump_dest(ir_data_t *ir, int jump_uop)
{
uop_t *uop = &ir->uops[jump_uop];
uop->jump_dest_uop = ir->wr_pos;
}
static inline int
uop_gen(uint32_t uop_type, ir_data_t *ir)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
return ir->wr_pos - 1;
}
static inline int
uop_gen_reg_src1(uint32_t uop_type, ir_data_t *ir, int src_reg_a)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
return ir->wr_pos - 1;
}
static inline void
uop_gen_reg_src1_arg(uint32_t uop_type, ir_data_t *ir, int arg, int src_reg_a)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
}
static inline int
uop_gen_reg_src1_imm(uint32_t uop_type, ir_data_t *ir, int src_reg, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg);
uop->imm_data = imm;
return ir->wr_pos - 1;
}
static inline void
uop_gen_reg_dst_imm(uint32_t uop_type, ir_data_t *ir, int dest_reg, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
uop->imm_data = imm;
}
static inline void
uop_gen_reg_dst_pointer(uint32_t uop_type, ir_data_t *ir, int dest_reg, void *p)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
uop->p = p;
}
static inline void
uop_gen_reg_dst_src1(uint32_t uop_type, ir_data_t *ir, int dest_reg, int src_reg)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg);
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
}
static inline void
uop_gen_reg_dst_src1_imm(uint32_t uop_type, ir_data_t *ir, int dest_reg, int src_reg_a, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
uop->imm_data = imm;
}
static inline void
uop_gen_reg_dst_src2(uint32_t uop_type, ir_data_t *ir, int dest_reg, int src_reg_a, int src_reg_b)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
}
static inline void
uop_gen_reg_dst_src2_imm(uint32_t uop_type, ir_data_t *ir, int dest_reg, int src_reg_a, int src_reg_b, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
uop->imm_data = imm;
}
static inline void
uop_gen_reg_dst_src3(uint32_t uop_type, ir_data_t *ir, int dest_reg, int src_reg_a, int src_reg_b, int src_reg_c)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->src_reg_c = codegen_reg_read(src_reg_c);
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
}
static inline void
uop_gen_reg_dst_src_imm(uint32_t uop_type, ir_data_t *ir, int dest_reg, int src_reg, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg);
uop->dest_reg_a = codegen_reg_write(dest_reg, ir->wr_pos - 1);
uop->imm_data = imm;
}
static inline int
uop_gen_reg_src2(uint32_t uop_type, ir_data_t *ir, int src_reg_a, int src_reg_b)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
return ir->wr_pos - 1;
}
static inline void
uop_gen_reg_src2_imm(uint32_t uop_type, ir_data_t *ir, int src_reg_a, int src_reg_b, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->imm_data = imm;
}
static inline void
uop_gen_reg_src3(uint32_t uop_type, ir_data_t *ir, int src_reg_a, int src_reg_b, int src_reg_c)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->src_reg_c = codegen_reg_read(src_reg_c);
}
static inline void
uop_gen_reg_src3_imm(uint32_t uop_type, ir_data_t *ir, int src_reg_a, int src_reg_b, int src_reg_c, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->src_reg_c = codegen_reg_read(src_reg_c);
uop->imm_data = imm;
}
static inline void
uop_gen_imm(uint32_t uop_type, ir_data_t *ir, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->imm_data = imm;
}
static inline void
uop_gen_pointer(uint32_t uop_type, ir_data_t *ir, void *p)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->p = p;
}
static inline void
uop_gen_pointer_imm(uint32_t uop_type, ir_data_t *ir, void *p, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->p = p;
uop->imm_data = imm;
}
static inline void
uop_gen_reg_src_pointer(uint32_t uop_type, ir_data_t *ir, int src_reg_a, void *p)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->p = p;
}
static inline void
uop_gen_reg_src_pointer_imm(uint32_t uop_type, ir_data_t *ir, int src_reg_a, void *p, uint32_t imm)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->p = p;
uop->imm_data = imm;
}
static inline void
uop_gen_reg_src2_pointer(uint32_t uop_type, ir_data_t *ir, int src_reg_a, int src_reg_b, void *p)
{
uop_t *uop = uop_alloc(ir, uop_type);
uop->type = uop_type;
uop->src_reg_a = codegen_reg_read(src_reg_a);
uop->src_reg_b = codegen_reg_read(src_reg_b);
uop->p = p;
}
#define uop_LOAD_FUNC_ARG_REG(ir, arg, reg) uop_gen_reg_src1(UOP_LOAD_FUNC_ARG_0 + arg, ir, reg)
#define uop_LOAD_FUNC_ARG_IMM(ir, arg, imm) uop_gen_imm(UOP_LOAD_FUNC_ARG_0_IMM + arg, ir, imm)
#define uop_ADD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_ADD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_ADD_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_ADD_IMM, ir, dst_reg, src_reg, imm)
#define uop_ADD_LSHIFT(ir, dst_reg, src_reg_a, src_reg_b, shift) uop_gen_reg_dst_src2_imm(UOP_ADD_LSHIFT, ir, dst_reg, src_reg_a, src_reg_b, shift)
#define uop_AND(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_AND, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_AND_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_AND_IMM, ir, dst_reg, src_reg, imm)
#define uop_ANDN(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_ANDN, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_OR(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_OR, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_OR_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_OR_IMM, ir, dst_reg, src_reg, imm)
#define uop_SUB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_SUB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_SUB_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_SUB_IMM, ir, dst_reg, src_reg, imm)
#define uop_XOR(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_XOR, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_XOR_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_XOR_IMM, ir, dst_reg, src_reg, imm)
#define uop_SAR(ir, dst_reg, src_reg, shift_reg) uop_gen_reg_dst_src2(UOP_SAR, ir, dst_reg, src_reg, shift_reg)
#define uop_SAR_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_SAR_IMM, ir, dst_reg, src_reg, imm)
#define uop_SHL(ir, dst_reg, src_reg, shift_reg) uop_gen_reg_dst_src2(UOP_SHL, ir, dst_reg, src_reg, shift_reg)
#define uop_SHL_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_SHL_IMM, ir, dst_reg, src_reg, imm)
#define uop_SHR(ir, dst_reg, src_reg, shift_reg) uop_gen_reg_dst_src2(UOP_SHR, ir, dst_reg, src_reg, shift_reg)
#define uop_SHR_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_SHR_IMM, ir, dst_reg, src_reg, imm)
#define uop_ROL(ir, dst_reg, src_reg, shift_reg) uop_gen_reg_dst_src2(UOP_ROL, ir, dst_reg, src_reg, shift_reg)
#define uop_ROL_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_ROL_IMM, ir, dst_reg, src_reg, imm)
#define uop_ROR(ir, dst_reg, src_reg, shift_reg) uop_gen_reg_dst_src2(UOP_ROR, ir, dst_reg, src_reg, shift_reg)
#define uop_ROR_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_ROR_IMM, ir, dst_reg, src_reg, imm)
#define uop_CALL_FUNC(ir, p) uop_gen_pointer(UOP_CALL_FUNC, ir, p)
#define uop_CALL_FUNC_RESULT(ir, dst_reg, p) uop_gen_reg_dst_pointer(UOP_CALL_FUNC_RESULT, ir, dst_reg, p)
#define uop_CALL_INSTRUCTION_FUNC(ir, p) uop_gen_pointer(UOP_CALL_INSTRUCTION_FUNC, ir, p)
#define uop_CMP_IMM_JZ(ir, src_reg, imm, p) uop_gen_reg_src_pointer_imm(UOP_CMP_IMM_JZ, ir, src_reg, p, imm)
#define uop_CMP_IMM_JNZ_DEST(ir, src_reg, imm) uop_gen_reg_src1_imm(UOP_CMP_IMM_JNZ_DEST, ir, src_reg, imm)
#define uop_CMP_IMM_JZ_DEST(ir, src_reg, imm) uop_gen_reg_src1_imm(UOP_CMP_IMM_JZ_DEST, ir, src_reg, imm)
#define uop_CMP_JB(ir, src_reg_a, src_reg_b, p) uop_gen_reg_src2_pointer(UOP_CMP_JB, ir, src_reg_a, src_reg_b, p)
#define uop_CMP_JNBE(ir, src_reg_a, src_reg_b, p) uop_gen_reg_src2_pointer(UOP_CMP_JNBE, ir, src_reg_a, src_reg_b, p)
#define uop_CMP_JNB_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JNB_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JNBE_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JNBE_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JNL_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JNL_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JNLE_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JNLE_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JNO_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JNO_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JNZ_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JNZ_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JB_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JB_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JBE_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JBE_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JL_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JL_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JLE_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JLE_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JO_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JO_DEST, ir, src_reg_a, src_reg_b)
#define uop_CMP_JZ_DEST(ir, src_reg_a, src_reg_b) uop_gen_reg_src2(UOP_CMP_JZ_DEST, ir, src_reg_a, src_reg_b)
#define uop_FADD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_FADD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_FCOM(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_FCOM, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_FDIV(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_FDIV, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_FMUL(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_FMUL, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_FSUB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_FSUB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_FABS(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_FABS, ir, dst_reg, src_reg)
#define uop_FCHS(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_FCHS, ir, dst_reg, src_reg)
#define uop_FSQRT(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_FSQRT, ir, dst_reg, src_reg)
#define uop_FTST(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_FTST, ir, dst_reg, src_reg)
#define uop_FP_ENTER(ir) \
do { \
if (!codegen_fpu_entered) \
uop_gen_imm(UOP_FP_ENTER, ir, cpu_state.oldpc); \
codegen_fpu_entered = 1; \
codegen_mmx_entered = 0; \
} while (0)
#define uop_MMX_ENTER(ir) \
do { \
if (!codegen_mmx_entered) \
uop_gen_imm(UOP_MMX_ENTER, ir, cpu_state.oldpc); \
codegen_mmx_entered = 1; \
codegen_fpu_entered = 0; \
} while (0)
#define uop_JMP(ir, p) uop_gen_pointer(UOP_JMP, ir, p)
#define uop_JMP_DEST(ir) uop_gen(UOP_JMP_DEST, ir)
#define uop_LOAD_SEG(ir, p, src_reg) uop_gen_reg_src_pointer(UOP_LOAD_SEG, ir, src_reg, p)
#define uop_MEM_LOAD_ABS(ir, dst_reg, seg_reg, imm) uop_gen_reg_dst_src_imm(UOP_MEM_LOAD_ABS, ir, dst_reg, seg_reg, imm)
#define uop_MEM_LOAD_REG(ir, dst_reg, seg_reg, addr_reg) uop_gen_reg_dst_src2_imm(UOP_MEM_LOAD_REG, ir, dst_reg, seg_reg, addr_reg, 0)
#define uop_MEM_LOAD_REG_OFFSET(ir, dst_reg, seg_reg, addr_reg, offset) uop_gen_reg_dst_src2_imm(UOP_MEM_LOAD_REG, ir, dst_reg, seg_reg, addr_reg, offset)
#define uop_MEM_LOAD_SINGLE(ir, dst_reg, seg_reg, addr_reg) uop_gen_reg_dst_src2_imm(UOP_MEM_LOAD_SINGLE, ir, dst_reg, seg_reg, addr_reg, 0)
#define uop_MEM_LOAD_DOUBLE(ir, dst_reg, seg_reg, addr_reg) uop_gen_reg_dst_src2_imm(UOP_MEM_LOAD_DOUBLE, ir, dst_reg, seg_reg, addr_reg, 0)
#define uop_MEM_STORE_ABS(ir, seg_reg, imm, src_reg) uop_gen_reg_src2_imm(UOP_MEM_STORE_ABS, ir, seg_reg, src_reg, imm)
#define uop_MEM_STORE_REG(ir, seg_reg, addr_reg, src_reg) uop_gen_reg_src3_imm(UOP_MEM_STORE_REG, ir, seg_reg, addr_reg, src_reg, 0)
#define uop_MEM_STORE_REG_OFFSET(ir, seg_reg, addr_reg, offset, src_reg) uop_gen_reg_src3_imm(UOP_MEM_STORE_REG, ir, seg_reg, addr_reg, src_reg, offset)
#define uop_MEM_STORE_IMM_8(ir, seg_reg, addr_reg, imm) uop_gen_reg_src2_imm(UOP_MEM_STORE_IMM_8, ir, seg_reg, addr_reg, imm)
#define uop_MEM_STORE_IMM_16(ir, seg_reg, addr_reg, imm) uop_gen_reg_src2_imm(UOP_MEM_STORE_IMM_16, ir, seg_reg, addr_reg, imm)
#define uop_MEM_STORE_IMM_32(ir, seg_reg, addr_reg, imm) uop_gen_reg_src2_imm(UOP_MEM_STORE_IMM_32, ir, seg_reg, addr_reg, imm)
#define uop_MEM_STORE_SINGLE(ir, seg_reg, addr_reg, src_reg) uop_gen_reg_src3_imm(UOP_MEM_STORE_SINGLE, ir, seg_reg, addr_reg, src_reg, 0)
#define uop_MEM_STORE_DOUBLE(ir, seg_reg, addr_reg, src_reg) uop_gen_reg_src3_imm(UOP_MEM_STORE_DOUBLE, ir, seg_reg, addr_reg, src_reg, 0)
#define uop_MOV(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_MOV, ir, dst_reg, src_reg)
#define uop_MOV_IMM(ir, reg, imm) uop_gen_reg_dst_imm(UOP_MOV_IMM, ir, reg, imm)
#define uop_MOV_PTR(ir, reg, p) uop_gen_reg_dst_pointer(UOP_MOV_PTR, ir, reg, p)
#define uop_MOV_REG_PTR(ir, reg, p) uop_gen_reg_dst_pointer(UOP_MOV_REG_PTR, ir, reg, p)
#define uop_MOVZX_REG_PTR_8(ir, reg, p) uop_gen_reg_dst_pointer(UOP_MOVZX_REG_PTR_8, ir, reg, p)
#define uop_MOVZX_REG_PTR_16(ir, reg, p) uop_gen_reg_dst_pointer(UOP_MOVZX_REG_PTR_16, ir, reg, p)
#define uop_MOVSX(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_MOVSX, ir, dst_reg, src_reg)
#define uop_MOVZX(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_MOVZX, ir, dst_reg, src_reg)
#define uop_MOV_DOUBLE_INT(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_MOV_DOUBLE_INT, ir, dst_reg, src_reg)
#define uop_MOV_INT_DOUBLE(ir, dst_reg, src_reg /*, nrc, orc*/) uop_gen_reg_dst_src1(UOP_MOV_INT_DOUBLE, ir, dst_reg, src_reg /*, nrc, orc*/)
#define uop_MOV_INT_DOUBLE_64(ir, dst_reg, src_reg_d, src_reg_q, tag) uop_gen_reg_dst_src3(UOP_MOV_INT_DOUBLE_64, ir, dst_reg, src_reg_d, src_reg_q, tag)
#define uop_NOP_BARRIER(ir) uop_gen(UOP_NOP_BARRIER, ir)
#define uop_PACKSSWB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PACKSSWB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PACKSSDW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PACKSSDW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PACKUSWB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PACKUSWB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDSB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDSB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDSW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDSW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDUSB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDUSB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PADDUSW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PADDUSW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PCMPEQB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PCMPEQB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PCMPEQW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PCMPEQW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PCMPEQD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PCMPEQD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PCMPGTB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PCMPGTB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PCMPGTW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PCMPGTW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PCMPGTD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PCMPGTD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PF2ID(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_PF2ID, ir, dst_reg, src_reg)
#define uop_PFADD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFADD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFCMPEQ(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFCMPEQ, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFCMPGE(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFCMPGE, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFCMPGT(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFCMPGT, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFMAX(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFMAX, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFMIN(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFMIN, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFMUL(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFMUL, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PFRCP(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_PFRCP, ir, dst_reg, src_reg)
#define uop_PFRSQRT(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_PFRSQRT, ir, dst_reg, src_reg)
#define uop_PFSUB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PFSUB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PI2FD(ir, dst_reg, src_reg) uop_gen_reg_dst_src1(UOP_PI2FD, ir, dst_reg, src_reg)
#define uop_PMADDWD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PMADDWD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PMULHW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PMULHW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PMULLW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PMULLW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSLLW_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSLLW_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSLLD_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSLLD_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSLLQ_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSLLQ_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSRAW_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSRAW_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSRAD_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSRAD_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSRAQ_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSRAQ_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSRLW_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSRLW_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSRLD_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSRLD_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSRLQ_IMM(ir, dst_reg, src_reg, imm) uop_gen_reg_dst_src_imm(UOP_PSRLQ_IMM, ir, dst_reg, src_reg, imm)
#define uop_PSUBB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSUBW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSUBD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSUBSB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBSB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSUBSW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBSW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSUBUSB(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBUSB, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PSUBUSW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PSUBUSW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PUNPCKHBW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PUNPCKHBW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PUNPCKHWD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PUNPCKHWD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PUNPCKHDQ(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PUNPCKHDQ, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PUNPCKLBW(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PUNPCKLBW, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PUNPCKLWD(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PUNPCKLWD, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_PUNPCKLDQ(ir, dst_reg, src_reg_a, src_reg_b) uop_gen_reg_dst_src2(UOP_PUNPCKLDQ, ir, dst_reg, src_reg_a, src_reg_b)
#define uop_STORE_PTR_IMM(ir, p, imm) uop_gen_pointer_imm(UOP_STORE_P_IMM, ir, p, imm)
#define uop_STORE_PTR_IMM_8(ir, p, imm) uop_gen_pointer_imm(UOP_STORE_P_IMM_8, ir, p, imm)
#define uop_STORE_PTR_IMM_16(ir, p, imm) uop_gen_pointer_imm(UOP_STORE_P_IMM_16, ir, p, imm)
#define uop_TEST_JNS_DEST(ir, src_reg) uop_gen_reg_src1(UOP_TEST_JNS_DEST, ir, src_reg)
#define uop_TEST_JS_DEST(ir, src_reg) uop_gen_reg_src1(UOP_TEST_JS_DEST, ir, src_reg)
#ifdef DEBUG_EXTRA
# define uop_LOG_INSTR(ir, imm) uop_gen_imm(UOP_LOG_INSTR, ir, imm)
#endif
void codegen_direct_read_8(codeblock_t *block, int host_reg, void *p);
void codegen_direct_read_16(codeblock_t *block, int host_reg, void *p);
void codegen_direct_read_32(codeblock_t *block, int host_reg, void *p);
void codegen_direct_read_64(codeblock_t *block, int host_reg, void *p);
void codegen_direct_read_pointer(codeblock_t *block, int host_reg, void *p);
void codegen_direct_read_double(codeblock_t *block, int host_reg, void *p);
void codegen_direct_read_st_8(codeblock_t *block, int host_reg, void *base, int reg_idx);
void codegen_direct_read_st_64(codeblock_t *block, int host_reg, void *base, int reg_idx);
void codegen_direct_read_st_double(codeblock_t *block, int host_reg, void *base, int reg_idx);
void codegen_direct_write_8(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_16(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_32(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_64(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_pointer(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_ptr(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_double(codeblock_t *block, void *p, int host_reg);
void codegen_direct_write_st_8(codeblock_t *block, void *base, int reg_idx, int host_reg);
void codegen_direct_write_st_64(codeblock_t *block, void *base, int reg_idx, int host_reg);
void codegen_direct_write_st_double(codeblock_t *block, void *base, int reg_idx, int host_reg);
void codegen_direct_read_16_stack(codeblock_t *block, int host_reg, int stack_offset);
void codegen_direct_read_32_stack(codeblock_t *block, int host_reg, int stack_offset);
void codegen_direct_read_64_stack(codeblock_t *block, int host_reg, int stack_offset);
void codegen_direct_read_pointer_stack(codeblock_t *block, int host_reg, int stack_offset);
void codegen_direct_read_double_stack(codeblock_t *block, int host_reg, int stack_offset);
void codegen_direct_write_32_stack(codeblock_t *block, int stack_offset, int host_reg);
void codegen_direct_write_64_stack(codeblock_t *block, int stack_offset, int host_reg);
void codegen_direct_write_pointer_stack(codeblock_t *block, int stack_offset, int host_reg);
void codegen_direct_write_double_stack(codeblock_t *block, int stack_offset, int host_reg);
void codegen_set_jump_dest(codeblock_t *block, void *p);
void codegen_direct_write_8_imm(codeblock_t *block, void *p, uint8_t imm_data);
void codegen_direct_write_16_imm(codeblock_t *block, void *p, uint16_t imm_data);
void codegen_direct_write_32_imm(codeblock_t *block, void *p, uint32_t imm_data);
void codegen_direct_write_32_imm_stack(codeblock_t *block, int stack_offset, uint32_t imm_data);
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_ir_defs.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 16,078 |
```objective-c
#define REG_R0 0
#define REG_R1 1
#define REG_R2 2
#define REG_R3 3
#define REG_R4 4
#define REG_R5 5
#define REG_R6 6
#define REG_R7 7
#define REG_R8 8
#define REG_R9 9
#define REG_R10 10
#define REG_R11 11
#define REG_R12 12
#define REG_HOST_SP 13
#define REG_LR 14
#define REG_PC 15
#define REG_ARG0 REG_R0
#define REG_ARG1 REG_R1
#define REG_ARG2 REG_R2
#define REG_ARG3 REG_R3
#define REG_CPUSTATE REG_R10
#define REG_TEMP REG_R3
#define REG_TEMP2 REG_R2
#define REG_D0 0
#define REG_D1 1
#define REG_D2 2
#define REG_D3 3
#define REG_D4 4
#define REG_D5 5
#define REG_D6 6
#define REG_D7 7
#define REG_D8 8
#define REG_D9 9
#define REG_D10 10
#define REG_D11 11
#define REG_D12 12
#define REG_D13 13
#define REG_D14 14
#define REG_D15 15
#define REG_D_TEMP REG_D0
#define REG_Q_TEMP REG_D0
#define REG_Q_TEMP_2 REG_D2
#define REG_MASK_R0 (1 << REG_R0)
#define REG_MASK_R1 (1 << REG_R1)
#define REG_MASK_R2 (1 << REG_R2)
#define REG_MASK_R3 (1 << REG_R3)
#define REG_MASK_R4 (1 << REG_R4)
#define REG_MASK_R5 (1 << REG_R5)
#define REG_MASK_R6 (1 << REG_R6)
#define REG_MASK_R7 (1 << REG_R7)
#define REG_MASK_R8 (1 << REG_R8)
#define REG_MASK_R9 (1 << REG_R9)
#define REG_MASK_R10 (1 << REG_R10)
#define REG_MASK_R11 (1 << REG_R11)
#define REG_MASK_R12 (1 << REG_R12)
#define REG_MASK_SP (1 << REG_HOST_SP)
#define REG_MASK_LR (1 << REG_LR)
#define REG_MASK_PC (1 << REG_PC)
#define REG_MASK_LOCAL (REG_MASK_R4 | REG_MASK_R5 | REG_MASK_R6 | REG_MASK_R7 | REG_MASK_R8 | REG_MASK_R9 | REG_MASK_R10 | REG_MASK_R11)
#define CODEGEN_HOST_REGS 7
#define CODEGEN_HOST_FP_REGS 8
extern void *codegen_mem_load_byte;
extern void *codegen_mem_load_word;
extern void *codegen_mem_load_long;
extern void *codegen_mem_load_quad;
extern void *codegen_mem_load_single;
extern void *codegen_mem_load_double;
extern void *codegen_mem_store_byte;
extern void *codegen_mem_store_word;
extern void *codegen_mem_store_long;
extern void *codegen_mem_store_quad;
extern void *codegen_mem_store_single;
extern void *codegen_mem_store_double;
extern void *codegen_fp_round;
extern void *codegen_gpf_rout;
extern void *codegen_exit_rout;
``` | /content/code_sandbox/src/codegen_new/codegen_backend_arm_defs.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 733 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include "codegen.h"
#include "codegen_ir.h"
#include "codegen_ops.h"
#include "codegen_ops_3dnow.h"
#include "codegen_ops_arith.h"
#include "codegen_ops_branch.h"
#include "codegen_ops_fpu_arith.h"
#include "codegen_ops_fpu_constant.h"
#include "codegen_ops_fpu_loadstore.h"
#include "codegen_ops_fpu_misc.h"
#include "codegen_ops_jump.h"
#include "codegen_ops_logic.h"
#include "codegen_ops_misc.h"
#include "codegen_ops_mmx_arith.h"
#include "codegen_ops_mmx_cmp.h"
#include "codegen_ops_mmx_loadstore.h"
#include "codegen_ops_mmx_logic.h"
#include "codegen_ops_mmx_pack.h"
#include "codegen_ops_mmx_shift.h"
#include "codegen_ops_mov.h"
#include "codegen_ops_shift.h"
#include "codegen_ops_stack.h"
RecompOpFn recomp_opcodes[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropADD_b_rmw, ropADD_w_rmw, ropADD_b_rm, ropADD_w_rm, ropADD_AL_imm, ropADD_AX_imm, ropPUSH_ES_16, ropPOP_ES_16, ropOR_b_rmw, ropOR_w_rmw, ropOR_b_rm, ropOR_w_rm, ropOR_AL_imm, ropOR_AX_imm, ropPUSH_CS_16, NULL,
/*10*/ ropADC_b_rmw, ropADC_w_rmw, ropADC_b_rm, ropADC_w_rm, ropADC_AL_imm, ropADC_AX_imm, ropPUSH_SS_16, NULL, ropSBB_b_rmw, ropSBB_w_rmw, ropSBB_b_rm, ropSBB_w_rm, ropSBB_AL_imm, ropSBB_AX_imm, ropPUSH_DS_16, ropPOP_DS_16,
/*20*/ ropAND_b_rmw, ropAND_w_rmw, ropAND_b_rm, ropAND_w_rm, ropAND_AL_imm, ropAND_AX_imm, NULL, NULL, ropSUB_b_rmw, ropSUB_w_rmw, ropSUB_b_rm, ropSUB_w_rm, ropSUB_AL_imm, ropSUB_AX_imm, NULL, NULL,
/*30*/ ropXOR_b_rmw, ropXOR_w_rmw, ropXOR_b_rm, ropXOR_w_rm, ropXOR_AL_imm, ropXOR_AX_imm, NULL, NULL, ropCMP_b_rmw, ropCMP_w_rmw, ropCMP_b_rm, ropCMP_w_rm, ropCMP_AL_imm, ropCMP_AX_imm, NULL, NULL,
/*40*/ ropINC_r16, ropINC_r16, ropINC_r16, ropINC_r16, ropINC_r16, ropINC_r16, ropINC_r16, ropINC_r16, ropDEC_r16, ropDEC_r16, ropDEC_r16, ropDEC_r16, ropDEC_r16, ropDEC_r16, ropDEC_r16, ropDEC_r16,
/*50*/ ropPUSH_r16, ropPUSH_r16, ropPUSH_r16, ropPUSH_r16, ropPUSH_r16, ropPUSH_r16, ropPUSH_r16, ropPUSH_r16, ropPOP_r16, ropPOP_r16, ropPOP_r16, ropPOP_r16, ropPOP_r16, ropPOP_r16, ropPOP_r16, ropPOP_r16,
/*60*/ ropPUSHA_16, ropPOPA_16, NULL, NULL, NULL, NULL, NULL, NULL, ropPUSH_imm_16, NULL, ropPUSH_imm_16_8,NULL, NULL, NULL, NULL, NULL,
/*70*/ ropJO_8, ropJNO_8, ropJB_8, ropJNB_8, ropJE_8, ropJNE_8, ropJBE_8, ropJNBE_8, ropJS_8, ropJNS_8, ropJP_8, ropJNP_8, ropJL_8, ropJNL_8, ropJLE_8, ropJNLE_8,
/*80*/ rop80, rop81_w, rop80, rop83_w, ropTEST_b_rm, ropTEST_w_rm, ropXCHG_8, ropXCHG_16, ropMOV_b_r, ropMOV_w_r, ropMOV_r_b, ropMOV_r_w, ropMOV_w_seg, ropLEA_16, ropMOV_seg_w, ropPOP_W,
/*90*/ ropNOP, ropXCHG_AX, ropXCHG_AX, ropXCHG_AX, ropXCHG_AX, ropXCHG_AX, ropXCHG_AX, ropXCHG_AX, ropCBW, ropCWD, NULL, NULL, ropPUSHF, NULL, NULL, NULL,
/*a0*/ ropMOV_AL_abs, ropMOV_AX_abs, ropMOV_abs_AL, ropMOV_abs_AX, NULL, NULL, NULL, NULL, ropTEST_AL_imm, ropTEST_AX_imm, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rw_imm, ropMOV_rw_imm, ropMOV_rw_imm, ropMOV_rw_imm, ropMOV_rw_imm, ropMOV_rw_imm, ropMOV_rw_imm, ropMOV_rw_imm,
/*c0*/ ropC0, ropC1_w, ropRET_imm_16, ropRET_16, ropLES_16, ropLDS_16, ropMOV_b_imm, ropMOV_w_imm, NULL, ropLEAVE_16, ropRETF_imm_16, ropRETF_16, NULL, NULL, NULL, NULL,
/*d0*/ ropD0, ropD1_w, ropD2, ropD3_w, NULL, NULL, NULL, ropXLAT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ ropLOOPNE, ropLOOPE, ropLOOP, ropJCXZ, NULL, NULL, NULL, NULL, ropCALL_r16, ropJMP_r16, ropJMP_far_16, ropJMP_r8, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, ropCMC, ropF6, ropF7_16, ropCLC, ropSTC, ropCLI, ropSTI, ropCLD, ropSTD, ropINCDEC, ropFF_16,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropADD_b_rmw, ropADD_l_rmw, ropADD_b_rm, ropADD_l_rm, ropADD_AL_imm, ropADD_EAX_imm, ropPUSH_ES_32, ropPOP_ES_32, ropOR_b_rmw, ropOR_l_rmw, ropOR_b_rm, ropOR_l_rm, ropOR_AL_imm, ropOR_EAX_imm, ropPUSH_CS_32, NULL,
/*10*/ ropADC_b_rmw, ropADC_l_rmw, ropADC_b_rm, ropADC_l_rm, ropADC_AL_imm, ropADC_EAX_imm, ropPUSH_SS_32, NULL, ropSBB_b_rmw, ropSBB_l_rmw, ropSBB_b_rm, ropSBB_l_rm, ropSBB_AL_imm, ropSBB_EAX_imm, ropPUSH_DS_32, ropPOP_DS_32,
/*20*/ ropAND_b_rmw, ropAND_l_rmw, ropAND_b_rm, ropAND_l_rm, ropAND_AL_imm, ropAND_EAX_imm, NULL, NULL, ropSUB_b_rmw, ropSUB_l_rmw, ropSUB_b_rm, ropSUB_l_rm, ropSUB_AL_imm, ropSUB_EAX_imm, NULL, NULL,
/*30*/ ropXOR_b_rmw, ropXOR_l_rmw, ropXOR_b_rm, ropXOR_l_rm, ropXOR_AL_imm, ropXOR_EAX_imm, NULL, NULL, ropCMP_b_rmw, ropCMP_l_rmw, ropCMP_b_rm, ropCMP_l_rm, ropCMP_AL_imm, ropCMP_EAX_imm, NULL, NULL,
/*40*/ ropINC_r32, ropINC_r32, ropINC_r32, ropINC_r32, ropINC_r32, ropINC_r32, ropINC_r32, ropINC_r32, ropDEC_r32, ropDEC_r32, ropDEC_r32, ropDEC_r32, ropDEC_r32, ropDEC_r32, ropDEC_r32, ropDEC_r32,
/*50*/ ropPUSH_r32, ropPUSH_r32, ropPUSH_r32, ropPUSH_r32, ropPUSH_r32, ropPUSH_r32, ropPUSH_r32, ropPUSH_r32, ropPOP_r32, ropPOP_r32, ropPOP_r32, ropPOP_r32, ropPOP_r32, ropPOP_r32, ropPOP_r32, ropPOP_r32,
/*60*/ ropPUSHA_32, ropPOPA_32, NULL, NULL, NULL, NULL, NULL, NULL, ropPUSH_imm_32, NULL, ropPUSH_imm_32_8,NULL, NULL, NULL, NULL, NULL,
/*70*/ ropJO_8, ropJNO_8, ropJB_8, ropJNB_8, ropJE_8, ropJNE_8, ropJBE_8, ropJNBE_8, ropJS_8, ropJNS_8, ropJP_8, ropJNP_8, ropJL_8, ropJNL_8, ropJLE_8, ropJNLE_8,
/*80*/ rop80, rop81_l, rop80, rop83_l, ropTEST_b_rm, ropTEST_l_rm, ropXCHG_8, ropXCHG_32, ropMOV_b_r, ropMOV_l_r, ropMOV_r_b, ropMOV_r_l, ropMOV_l_seg, ropLEA_32, ropMOV_seg_w, ropPOP_L,
/*90*/ ropNOP, ropXCHG_EAX, ropXCHG_EAX, ropXCHG_EAX, ropXCHG_EAX, ropXCHG_EAX, ropXCHG_EAX, ropXCHG_EAX, ropCWDE, ropCDQ, NULL, NULL, ropPUSHFD, NULL, NULL, NULL,
/*a0*/ ropMOV_AL_abs, ropMOV_EAX_abs, ropMOV_abs_AL, ropMOV_abs_EAX, NULL, NULL, NULL, NULL, ropTEST_AL_imm, ropTEST_EAX_imm,NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rb_imm, ropMOV_rl_imm, ropMOV_rl_imm, ropMOV_rl_imm, ropMOV_rl_imm, ropMOV_rl_imm, ropMOV_rl_imm, ropMOV_rl_imm, ropMOV_rl_imm,
/*c0*/ ropC0, ropC1_l, ropRET_imm_32, ropRET_32, ropLES_32, ropLDS_32, ropMOV_b_imm, ropMOV_l_imm, NULL, ropLEAVE_32, ropRETF_imm_32, ropRETF_32, NULL, NULL, NULL, NULL,
/*d0*/ ropD0, ropD1_l, ropD2, ropD3_l, NULL, NULL, NULL, ropXLAT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ ropLOOPNE, ropLOOPE, ropLOOP, ropJCXZ, NULL, NULL, NULL, NULL, ropCALL_r32, ropJMP_r32, ropJMP_far_32, ropJMP_r8, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, ropCMC, ropF6, ropF7_32, ropCLC, ropSTC, ropCLI, ropSTI, ropCLD, ropSTD, ropINCDEC, ropFF_32
// clang-format on
};
RecompOpFn recomp_opcodes_0f[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM || defined __aarch64__ || defined _M_ARM64
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#else
/*60*/ ropPUNPCKLBW, ropPUNPCKLWD, ropPUNPCKLDQ, ropPACKSSWB, ropPCMPGTB, ropPCMPGTW, ropPCMPGTD, ropPACKUSWB, ropPUNPCKHBW, ropPUNPCKHWD, ropPUNPCKHDQ, ropPACKSSDW, NULL, NULL, ropMOVD_r_d, ropMOVQ_r_q,
/*70*/ NULL, ropPSxxW_imm, ropPSxxD_imm, ropPSxxQ_imm, ropPCMPEQB, ropPCMPEQW, ropPCMPEQD, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropMOVD_d_r, ropMOVQ_q_r,
#endif
/*80*/ ropJO_16, ropJNO_16, ropJB_16, ropJNB_16, ropJE_16, ropJNE_16, ropJBE_16, ropJNBE_16, ropJS_16, ropJNS_16, ropJP_16, ropJNP_16, ropJL_16, ropJNL_16, ropJLE_16, ropJNLE_16,
/*90*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*a0*/ ropPUSH_FS_16, ropPOP_FS_16, NULL, NULL, ropSHLD_16_imm, NULL, NULL, NULL, ropPUSH_GS_16, ropPOP_GS_16, NULL, NULL, ropSHRD_16_imm, NULL, NULL, NULL,
/*b0*/ NULL, NULL, ropLSS_16, NULL, ropLFS_16, ropLGS_16, ropMOVZX_16_8, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropMOVSX_16_8, NULL,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM || defined __aarch64__ || defined _M_ARM64
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#else
/*d0*/ NULL, NULL, NULL, NULL, NULL, ropPMULLW, NULL, NULL, ropPSUBUSB, ropPSUBUSW, NULL, ropPAND, ropPADDUSB, ropPADDUSW, NULL, ropPANDN,
/*e0*/ NULL, NULL, NULL, NULL, NULL, ropPMULHW, NULL, NULL, ropPSUBSB, ropPSUBSW, NULL, ropPOR, ropPADDSB, ropPADDSW, NULL, ropPXOR,
/*f0*/ NULL, NULL, NULL, NULL, NULL, ropPMADDWD, NULL, NULL, ropPSUBB, ropPSUBW, ropPSUBD, NULL, ropPADDB, ropPADDW, ropPADDD, NULL,
#endif
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM || defined __aarch64__ || defined _M_ARM64
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#else
/*60*/ ropPUNPCKLBW, ropPUNPCKLWD, ropPUNPCKLDQ, ropPACKSSWB, ropPCMPGTB, ropPCMPGTW, ropPCMPGTD, ropPACKUSWB, ropPUNPCKHBW, ropPUNPCKHWD, ropPUNPCKHDQ, ropPACKSSDW, NULL, NULL, ropMOVD_r_d, ropMOVQ_r_q,
/*70*/ NULL, ropPSxxW_imm, ropPSxxD_imm, ropPSxxQ_imm, ropPCMPEQB, ropPCMPEQW, ropPCMPEQD, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropMOVD_d_r, ropMOVQ_q_r,
#endif
/*80*/ ropJO_32, ropJNO_32, ropJB_32, ropJNB_32, ropJE_32, ropJNE_32, ropJBE_32, ropJNBE_32, ropJS_32, ropJNS_32, ropJP_32, ropJNP_32, ropJL_32, ropJNL_32, ropJLE_32, ropJNLE_32,
/*90*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*a0*/ ropPUSH_FS_32, ropPOP_FS_32, NULL, NULL, ropSHLD_32_imm, NULL, NULL, NULL, ropPUSH_GS_32, ropPOP_GS_32, NULL, NULL, ropSHRD_32_imm, NULL, NULL, NULL,
/*b0*/ NULL, NULL, ropLSS_32, NULL, ropLFS_32, ropLGS_32, ropMOVZX_32_8, ropMOVZX_32_16, NULL, NULL, NULL, NULL, NULL, NULL, ropMOVSX_32_8, ropMOVSX_32_16,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM || defined __aarch64__ || defined _M_ARM64
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#else
/*d0*/ NULL, NULL, NULL, NULL, NULL, ropPMULLW, NULL, NULL, ropPSUBUSB, ropPSUBUSW, NULL, ropPAND, ropPADDUSB, ropPADDUSW, NULL, ropPANDN,
/*e0*/ NULL, NULL, NULL, NULL, NULL, ropPMULHW, NULL, NULL, ropPSUBSB, ropPSUBSW, NULL, ropPOR, ropPADDSB, ropPADDSW, NULL, ropPXOR,
/*f0*/ NULL, NULL, NULL, NULL, NULL, ropPMADDWD, NULL, NULL, ropPSUBB, ropPSUBW, ropPSUBD, NULL, ropPADDB, ropPADDW, ropPADDD, NULL,
#endif
// clang-format on
};
RecompOpFn recomp_opcodes_0f_no_mmx[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*80*/ ropJO_16, ropJNO_16, ropJB_16, ropJNB_16, ropJE_16, ropJNE_16, ropJBE_16, ropJNBE_16, ropJS_16, ropJNS_16, ropJP_16, ropJNP_16, ropJL_16, ropJNL_16, ropJLE_16, ropJNLE_16,
/*90*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*a0*/ ropPUSH_FS_16, ropPOP_FS_16, NULL, NULL, ropSHLD_16_imm, NULL, NULL, NULL, ropPUSH_GS_16, ropPOP_GS_16, NULL, NULL, ropSHRD_16_imm, NULL, NULL, NULL,
/*b0*/ NULL, NULL, ropLSS_16, NULL, ropLFS_16, ropLGS_16, ropMOVZX_16_8, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropMOVSX_16_8, NULL,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*80*/ ropJO_32, ropJNO_32, ropJB_32, ropJNB_32, ropJE_32, ropJNE_32, ropJBE_32, ropJNBE_32, ropJS_32, ropJNS_32, ropJP_32, ropJNP_32, ropJL_32, ropJNL_32, ropJLE_32, ropJNLE_32,
/*90*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*a0*/ ropPUSH_FS_32, ropPOP_FS_32, NULL, NULL, ropSHLD_32_imm, NULL, NULL, NULL, ropPUSH_GS_32, ropPOP_GS_32, NULL, NULL, ropSHRD_32_imm, NULL, NULL, NULL,
/*b0*/ NULL, NULL, ropLSS_32, NULL, ropLFS_32, ropLGS_32, ropMOVZX_32_8, ropMOVZX_32_16, NULL, NULL, NULL, NULL, NULL, NULL, ropMOVSX_32_8, ropMOVSX_32_16,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
RecompOpFn recomp_opcodes_3DNOW[256] = {
// clang-format off
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM || defined __aarch64__ || defined _M_ARM64
0
#else
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropPI2FD, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropPF2ID, NULL, NULL,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*80*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropPFCMPGE, NULL, NULL, NULL, ropPFMIN, NULL, ropPFRCP, ropPFRSQRT, NULL, NULL, ropPFSUB, NULL, NULL, NULL, ropPFADD, NULL,
/*a0*/ ropPFCMPGT, NULL, NULL, NULL, ropPFMAX, NULL, ropPFRCPIT, ropPFRSQIT1, NULL, NULL, ropPFSUBR, NULL, NULL, NULL, NULL, NULL,
/*b0*/ ropPFCMPEQ, NULL, NULL, NULL, ropPFMUL, NULL, ropPFRCPIT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
#endif
// clang-format on
};
RecompOpFn recomp_opcodes_d8[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs,
/*10*/ ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs,
/*20*/ ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs,
/*30*/ ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs,
/*40*/ ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs,
/*50*/ ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs,
/*60*/ ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs,
/*70*/ ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs,
/*80*/ ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs,
/*90*/ ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs,
/*a0*/ ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs,
/*b0*/ ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs,
/*c0*/ ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL,
/*d0*/ ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP,
/*e0*/ ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR,
/*f0*/ ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs,
/*10*/ ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs,
/*20*/ ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs,
/*30*/ ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs,
/*40*/ ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs,
/*50*/ ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs,
/*60*/ ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs,
/*70*/ ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs,
/*80*/ ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFADDs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs, ropFMULs,
/*90*/ ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs, ropFCOMPs,
/*a0*/ ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs, ropFSUBRs,
/*b0*/ ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs, ropFDIVRs,
/*c0*/ ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFADD, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL, ropFMUL,
/*d0*/ ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP,
/*e0*/ ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUB, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR, ropFSUBR,
/*f0*/ ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIV, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR, ropFDIVR,
// clang-format on
};
RecompOpFn recomp_opcodes_d9[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW,
/*40*/ ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW,
/*80*/ ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW,
/*c0*/ ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP,
/*e0*/ ropFCHS, ropFABS, NULL, NULL, ropFTST, NULL, NULL, NULL, ropFLD1, NULL, NULL, NULL, NULL, NULL, ropFLDZ, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSQRT, NULL, NULL, NULL, NULL, NULL,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW,
/*40*/ ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW,
/*80*/ ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, ropFLDs, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs, ropFSTPs,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW, ropFSTCW,
/*c0*/ ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFLD, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH, ropFXCH,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP,
/*e0*/ ropFCHS, ropFABS, NULL, NULL, ropFTST, NULL, NULL, NULL, ropFLD1, NULL, NULL, NULL, NULL, NULL, ropFLDZ, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSQRT, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
RecompOpFn recomp_opcodes_da[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl,
/*10*/ ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl,
/*20*/ ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl,
/*30*/ ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl,
/*40*/ ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl,
/*50*/ ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl,
/*60*/ ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl,
/*70*/ ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl,
/*80*/ ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl,
/*90*/ ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl,
/*a0*/ ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl,
/*b0*/ ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFUCOMPP, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl,
/*10*/ ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl,
/*20*/ ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl,
/*30*/ ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl,
/*40*/ ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl,
/*50*/ ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl,
/*60*/ ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl,
/*70*/ ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl,
/*80*/ ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIADDl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl, ropFIMULl,
/*90*/ ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl, ropFICOMPl,
/*a0*/ ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl, ropFISUBRl,
/*b0*/ ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl, ropFIDIVRl,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFUCOMPP, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
RecompOpFn recomp_opcodes_db[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*80*/ ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*80*/ ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, ropFILDl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl, ropFISTPl,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
RecompOpFn recomp_opcodes_dc[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd,
/*10*/ ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd,
/*20*/ ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd,
/*30*/ ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd,
/*40*/ ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd,
/*50*/ ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd,
/*60*/ ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd,
/*70*/ ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd,
/*80*/ ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd,
/*90*/ ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd,
/*a0*/ ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd,
/*b0*/ ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd,
/*c0*/ ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr,
/*d0*/ ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP,
/*e0*/ ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr,
/*f0*/ ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd,
/*10*/ ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd,
/*20*/ ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd,
/*30*/ ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd,
/*40*/ ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd,
/*50*/ ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd,
/*60*/ ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd,
/*70*/ ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd,
/*80*/ ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFADDd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd, ropFMULd,
/*90*/ ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd, ropFCOMPd,
/*a0*/ ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd, ropFSUBRd,
/*b0*/ ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd, ropFDIVRd,
/*c0*/ ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFADDr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr, ropFMULr,
/*d0*/ ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOM, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP, ropFCOMP,
/*e0*/ ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBRr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr, ropFSUBr,
/*f0*/ ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVRr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr, ropFDIVr,
// clang-format on
};
RecompOpFn recomp_opcodes_dd[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW,
/*40*/ ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW,
/*80*/ ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW,
/*c0*/ ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ ropFST, ropFST, ropFST, ropFST, ropFST, ropFST, ropFST, ropFST, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP,
/*e0*/ ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW,
/*40*/ ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW,
/*80*/ ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, ropFLDd, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd, ropFSTPd,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW, ropFSTSW,
/*c0*/ ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, ropFFREE, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ ropFST, ropFST, ropFST, ropFST, ropFST, ropFST, ropFST, ropFST, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP, ropFSTP,
/*e0*/ ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOM, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP, ropFUCOMP,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
RecompOpFn recomp_opcodes_de[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw,
/*10*/ ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw,
/*20*/ ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw,
/*30*/ ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw,
/*40*/ ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw,
/*50*/ ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw,
/*60*/ ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw,
/*70*/ ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw,
/*80*/ ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw,
/*90*/ ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw,
/*a0*/ ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw,
/*b0*/ ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw,
/*c0*/ ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFCOMPP, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP,
/*f0*/ ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw,
/*10*/ ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw,
/*20*/ ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw,
/*30*/ ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw,
/*40*/ ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw,
/*50*/ ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw,
/*60*/ ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw,
/*70*/ ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw,
/*80*/ ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIADDw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw, ropFIMULw,
/*90*/ ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw, ropFICOMPw,
/*a0*/ ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw, ropFISUBRw,
/*b0*/ ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw, ropFIDIVRw,
/*c0*/ ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFADDP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP, ropFMULP,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFCOMPP, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBRP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP, ropFSUBP,
/*f0*/ ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVRP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP, ropFDIVP,
// clang-format on
};
RecompOpFn recomp_opcodes_df[512] = {
// clang-format off
/*16-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq,
/*40*/ ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq,
/*80*/ ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ ropFSTSW_AX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*32-bit data*/
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw,
/*20*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq,
/*30*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq,
/*40*/ ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw,
/*60*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq,
/*70*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq,
/*80*/ ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, ropFILDw, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*90*/ ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw, ropFISTPw,
/*a0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq, ropFILDq,
/*b0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq, ropFISTPq,
/*c0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*d0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ ropFSTSW_AX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*f0*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
``` | /content/code_sandbox/src/codegen_new/codegen_ops.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 32,966 |
```objective-c
#ifndef _CODEGEN_ALLOCATOR_H_
#define _CODEGEN_ALLOCATOR_H_
/*The allocator handles all allocation of executable memory. Since the two-pass
recompiler design makes applying hard limits to codeblock size difficult, the
allocator allows memory to be provided as and when required.
The allocator provides a block size of a little under 1 kB (slightly lower to
limit cache aliasing). Each generated codeblock is allocated one block by default,
and will allocate additional block(s) once the existing memory is sorted. Blocks
are chained together by jump instructions.
Due to the chaining, the total memory size is limited by the range of a jump
instruction. ARMv7 is restricted to +/- 32 MB, ARMv8 to +/- 128 MB, x86 to
+/- 2GB. As a result, total memory size is limited to 32 MB on ARMv7*/
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM
# define MEM_BLOCK_NR 32768
#else
# define MEM_BLOCK_NR 131072
#endif
#define MEM_BLOCK_MASK (MEM_BLOCK_NR - 1)
#define MEM_BLOCK_SIZE 0x3c0
void codegen_allocator_init(void);
/*Allocate a mem_block_t, and the associated backing memory.
If parent is non-NULL, then the new block will be added to the list in
parent->next*/
struct mem_block_t *codegen_allocator_allocate(struct mem_block_t *parent, int code_block);
/*Free a mem_block_t, and any subsequent blocks in the list at block->next*/
void codegen_allocator_free(struct mem_block_t *block);
/*Get a pointer to the backing memory associated with block*/
uint8_t *codeblock_allocator_get_ptr(struct mem_block_t *block);
/*Cache clean memory block list*/
void codegen_allocator_clean_blocks(struct mem_block_t *block);
extern int codegen_allocator_usage;
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_allocator.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 409 |
```objective-c
uint32_t ropJMP_r8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJMP_r16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJMP_r32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJMP_far_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJMP_far_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropCALL_r16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropCALL_r32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRET_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRET_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRET_imm_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRET_imm_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRETF_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRETF_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRETF_imm_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropRETF_imm_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
``` | /content/code_sandbox/src/codegen_new/codegen_ops_jump.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 661 |
```c
#if defined i386 || defined __i386 || defined __i386__ || defined _X86_ || defined _M_IX86
# include <stdint.h>
# include <86box/86box.h>
# include "cpu.h"
# include <86box/mem.h>
# include <86box/plat_unused.h>
# include "x86.h"
# include "x86_ops.h"
# include "x86seg_common.h"
# include "x86seg.h"
# include "x87_sf.h"
# include "386_common.h"
# include "codegen.h"
# include "codegen_allocator.h"
# include "codegen_backend.h"
# include "codegen_backend_x86_defs.h"
# include "codegen_backend_x86_ops.h"
# include "codegen_backend_x86_ops_fpu.h"
# include "codegen_backend_x86_ops_sse.h"
# include "codegen_ir_defs.h"
# define HOST_REG_IS_L(reg) (IREG_GET_SIZE(reg) == IREG_SIZE_L)
# define HOST_REG_IS_W(reg) (IREG_GET_SIZE(reg) == IREG_SIZE_W)
# define HOST_REG_IS_B(reg) (IREG_GET_SIZE(reg) == IREG_SIZE_B && IREG_GET_REG(reg) < 4)
# define HOST_REG_IS_BH(reg) (IREG_GET_SIZE(reg) == IREG_SIZE_BH && IREG_GET_REG(reg) < 4)
# define HOST_REG_GET(reg) ((IREG_GET_SIZE(reg) == IREG_SIZE_BH) ? (IREG_GET_REG((reg) &3) | 4) : (IREG_GET_REG(reg) & 7))
# define REG_IS_L(size) (size == IREG_SIZE_L)
# define REG_IS_W(size) (size == IREG_SIZE_W)
# define REG_IS_B(size) (size == IREG_SIZE_B || size == IREG_SIZE_BH)
# define REG_IS_BH(size) (size == IREG_SIZE_BH)
# define REG_IS_D(size) (size == IREG_SIZE_D)
# define REG_IS_Q(size) (size == IREG_SIZE_Q)
static int
codegen_ADD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_LEA_REG_REG(block, dest_reg, src_reg_a, src_reg_b);
else
host_x86_ADD32_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg_a);
host_x86_ADD16_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg_a);
host_x86_ADD8_REG_REG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ADD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_ADD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_LEA_REG_IMM(block, dest_reg, src_reg, uop->imm_data);
else
host_x86_ADD32_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_ADD16_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_ADD8_REG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ADD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_ADD_LSHIFT(codeblock_t *block, uop_t *uop)
{
if (!uop->imm_data) {
if (uop->dest_reg_a_real == uop->src_reg_a_real)
host_x86_ADD32_REG_REG(block, uop->dest_reg_a_real, uop->src_reg_b_real);
else
host_x86_LEA_REG_REG(block, uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
} else if (uop->imm_data < 4)
host_x86_LEA_REG_REG_SHIFT(block, uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real, uop->imm_data);
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_ADD_LSHIFT - shift out of range %i\n", uop->imm_data);
# endif
return 0;
}
static int
codegen_AND(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PAND_XREG_XREG(block, dest_reg, src_reg_b);
} else if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg_a);
host_x86_AND32_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg_a);
host_x86_AND16_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg_a);
host_x86_AND8_REG_REG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("AND %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_AND_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_AND32_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_AND16_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_AND8_REG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("AND_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_ANDN(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
# if 0
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
# endif
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PANDN_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ANDN %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_CALL_FUNC(codeblock_t *block, uop_t *uop)
{
host_x86_CALL(block, uop->p);
return 0;
}
static int
codegen_CALL_FUNC_RESULT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
# ifdef RECOMPILER_DEBUG
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (!REG_IS_L(dest_size))
fatal("CALL_FUNC_RESULT %02x\n", uop->dest_reg_a_real);
# endif
host_x86_CALL(block, uop->p);
host_x86_MOV32_REG_REG(block, dest_reg, REG_EAX);
return 0;
}
static int
codegen_CALL_INSTRUCTION_FUNC(codeblock_t *block, uop_t *uop)
{
host_x86_CALL(block, uop->p);
host_x86_TEST32_REG(block, REG_EAX, REG_EAX);
host_x86_JNZ(block, codegen_exit_rout);
# if 0
host_x86_CALL(block, codegen_debug);
# endif
return 0;
}
static int
codegen_CMP_IMM_JZ(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_x86_CMP32_REG_IMM(block, src_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_IMM_JZ %02x\n", uop->src_reg_a_real);
# endif
host_x86_JZ(block, uop->p);
return 0;
}
static int
codegen_CMP_IMM_JNZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_x86_CMP32_REG_IMM(block, src_reg, uop->imm_data);
} else if (REG_IS_W(src_size)) {
host_x86_CMP16_REG_IMM(block, src_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_IMM_JNZ_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNZ_long(block);
return 0;
}
static int
codegen_CMP_IMM_JZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_x86_CMP32_REG_IMM(block, src_reg, uop->imm_data);
} else if (REG_IS_W(src_size)) {
host_x86_CMP16_REG_IMM(block, src_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_IMM_JZ_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JZ_long(block);
return 0;
}
static int
codegen_CMP_JB(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
uint32_t *jump_p;
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JB %02x\n", uop->src_reg_a_real);
# endif
jump_p = host_x86_JB_long(block);
*jump_p = (uintptr_t) uop->p - ((uintptr_t) jump_p + 4);
return 0;
}
static int
codegen_CMP_JNBE(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
uint32_t *jump_p;
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNBE %02x\n", uop->src_reg_a_real);
# endif
jump_p = host_x86_JNBE_long(block);
*jump_p = (uintptr_t) uop->p - ((uintptr_t) jump_p + 4);
return 0;
}
static int
codegen_CMP_JNB_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNB_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNB_long(block);
return 0;
}
static int
codegen_CMP_JNBE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNBE_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNBE_long(block);
return 0;
}
static int
codegen_CMP_JNL_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNL_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNL_long(block);
return 0;
}
static int
codegen_CMP_JNLE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNLE_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNLE_long(block);
return 0;
}
static int
codegen_CMP_JNO_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNO_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNO_long(block);
return 0;
}
static int
codegen_CMP_JNZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JNZ_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNZ_long(block);
return 0;
}
static int
codegen_CMP_JB_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JB_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JB_long(block);
return 0;
}
static int
codegen_CMP_JBE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JBE_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JBE_long(block);
return 0;
}
static int
codegen_CMP_JL_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JL_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JL_long(block);
return 0;
}
static int
codegen_CMP_JLE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JLE_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JLE_long(block);
return 0;
}
static int
codegen_CMP_JO_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JO_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JO_long(block);
return 0;
}
static int
codegen_CMP_JZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_x86_CMP32_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_x86_CMP16_REG_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_x86_CMP8_REG_REG(block, src_reg_a, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("CMP_JZ_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JZ_long(block);
return 0;
}
static int
codegen_FABS(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && dest_reg == src_reg_a) {
host_x86_PXOR_XREG_XREG(block, REG_XMM_TEMP, REG_XMM_TEMP);
host_x86_SUBSD_XREG_XREG(block, REG_XMM_TEMP, dest_reg);
host_x86_MAXSD_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FABS %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_FCHS(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a)) {
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg_a);
host_x86_PXOR_XREG_XREG(block, dest_reg, dest_reg);
host_x86_SUBSD_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FCHS %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_FSQRT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a)) {
host_x86_SQRTSD_XREG_XREG(block, dest_reg, src_reg_a);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FSQRT %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_FTST(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_W(dest_size) && REG_IS_D(src_size_a)) {
host_x86_PXOR_XREG_XREG(block, REG_XMM_TEMP, REG_XMM_TEMP);
if (dest_reg != REG_EAX)
host_x86_MOV32_REG_REG(block, REG_ECX, REG_EAX);
host_x86_XOR32_REG_REG(block, REG_EAX, REG_EAX);
host_x86_COMISD_XREG_XREG(block, src_reg_a, REG_XMM_TEMP);
host_x86_LAHF(block);
host_x86_AND16_REG_IMM(block, REG_EAX, FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (dest_reg != REG_EAX) {
host_x86_MOV16_REG_REG(block, dest_reg, REG_EAX);
host_x86_MOV32_REG_REG(block, REG_EAX, REG_ECX);
}
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FTST %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_FADD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b) && dest_reg == src_reg_a) {
host_x86_ADDSD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FADD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_FCOM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_W(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
if (dest_reg != REG_EAX)
host_x86_MOV32_REG_REG(block, REG_ECX, REG_EAX);
host_x86_XOR32_REG_REG(block, REG_EAX, REG_EAX);
host_x86_COMISD_XREG_XREG(block, src_reg_a, src_reg_b);
host_x86_LAHF(block);
host_x86_AND16_REG_IMM(block, REG_EAX, FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (dest_reg != REG_EAX) {
host_x86_MOV16_REG_REG(block, dest_reg, REG_EAX);
host_x86_MOV32_REG_REG(block, REG_EAX, REG_ECX);
}
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FCOM %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_FDIV(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b) && dest_reg == src_reg_a) {
host_x86_DIVSD_XREG_XREG(block, dest_reg, src_reg_b);
} else if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg_a);
host_x86_DIVSD_XREG_XREG(block, REG_XMM_TEMP, src_reg_b);
host_x86_MOVQ_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FDIV %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_FMUL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b) && dest_reg == src_reg_a) {
host_x86_MULSD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FMUL %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_FSUB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b) && dest_reg == src_reg_a) {
host_x86_SUBSD_XREG_XREG(block, dest_reg, src_reg_b);
} else if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg_a);
host_x86_SUBSD_XREG_XREG(block, REG_XMM_TEMP, src_reg_b);
host_x86_MOVQ_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_FSUB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_FP_ENTER(codeblock_t *block, uop_t *uop)
{
uint32_t *branch_offset;
host_x86_MOV32_REG_ABS(block, REG_ECX, &cr0);
host_x86_TEST32_REG_IMM(block, REG_ECX, 0xc);
branch_offset = host_x86_JZ_long(block);
host_x86_MOV32_ABS_IMM(block, &cpu_state.oldpc, uop->imm_data);
host_x86_MOV32_STACK_IMM(block, STACK_ARG0, 7);
host_x86_CALL(block, x86_int);
host_x86_JMP(block, codegen_exit_rout);
*branch_offset = (uint32_t) ((uintptr_t) &block_write_data[block_pos] - (uintptr_t) branch_offset) - 4;
return 0;
}
static int
codegen_MMX_ENTER(codeblock_t *block, uop_t *uop)
{
uint32_t *branch_offset;
host_x86_MOV32_REG_ABS(block, REG_ECX, &cr0);
host_x86_TEST32_REG_IMM(block, REG_ECX, 0xc);
branch_offset = host_x86_JZ_long(block);
host_x86_MOV32_ABS_IMM(block, &cpu_state.oldpc, uop->imm_data);
host_x86_MOV32_STACK_IMM(block, STACK_ARG0, 7);
host_x86_CALL(block, x86_int);
host_x86_JMP(block, codegen_exit_rout);
*branch_offset = (uint32_t) ((uintptr_t) &block_write_data[block_pos] - (uintptr_t) branch_offset) - 4;
host_x86_MOV32_ABS_IMM(block, &cpu_state.tag[0], 0x01010101);
host_x86_MOV32_ABS_IMM(block, &cpu_state.tag[4], 0x01010101);
host_x86_MOV32_ABS_IMM(block, &cpu_state.TOP, 0);
host_x86_MOV8_ABS_IMM(block, &cpu_state.ismmx, 1);
return 0;
}
static int
codegen_JMP(codeblock_t *block, uop_t *uop)
{
host_x86_JMP(block, uop->p);
return 0;
}
static int
codegen_JMP_DEST(codeblock_t *block, uop_t *uop)
{
uop->p = host_x86_JMP_long(block);
return 0;
}
static int
codegen_LOAD_FUNC_ARG0(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_W(src_size)) {
host_x86_MOV16_STACK_REG(block, STACK_ARG0, src_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("codegen_LOAD_FUNC_ARG0 %02x\n", uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_LOAD_FUNC_ARG1(UNUSED(codeblock_t *block), UNUSED(uop_t *uop))
{
# ifdef RECOMPILER_DEBUG
fatal("codegen_LOAD_FUNC_ARG1 %02x\n", uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_LOAD_FUNC_ARG2(UNUSED(codeblock_t *block), UNUSED(uop_t *uop))
{
# ifdef RECOMPILER_DEBUG
fatal("codegen_LOAD_FUNC_ARG2 %02x\n", uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_LOAD_FUNC_ARG3(UNUSED(codeblock_t *block), UNUSED(uop_t *uop))
{
# ifdef RECOMPILER_DEBUG
fatal("codegen_LOAD_FUNC_ARG3 %02x\n", uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_LOAD_FUNC_ARG0_IMM(codeblock_t *block, uop_t *uop)
{
host_x86_MOV32_STACK_IMM(block, STACK_ARG0, uop->imm_data);
return 0;
}
static int
codegen_LOAD_FUNC_ARG1_IMM(codeblock_t *block, uop_t *uop)
{
host_x86_MOV32_STACK_IMM(block, STACK_ARG1, uop->imm_data);
return 0;
}
static int
codegen_LOAD_FUNC_ARG2_IMM(codeblock_t *block, uop_t *uop)
{
host_x86_MOV32_STACK_IMM(block, STACK_ARG2, uop->imm_data);
return 0;
}
static int
codegen_LOAD_FUNC_ARG3_IMM(codeblock_t *block, uop_t *uop)
{
host_x86_MOV32_STACK_IMM(block, STACK_ARG3, uop->imm_data);
return 0;
}
static int
codegen_LOAD_SEG(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
# ifdef RECOMPILER_DEBUG
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (!REG_IS_W(src_size))
fatal("LOAD_SEG %02x %p\n", uop->src_reg_a_real, uop->p);
# endif
host_x86_MOV16_STACK_REG(block, STACK_ARG0, src_reg);
host_x86_MOV32_STACK_IMM(block, STACK_ARG1, (uint32_t) uop->p);
host_x86_CALL(block, loadseg);
host_x86_TEST32_REG(block, REG_EAX, REG_EAX);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_LOAD_ABS(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_x86_LEA_REG_IMM(block, REG_ESI, seg_reg, uop->imm_data);
if (REG_IS_B(dest_size)) {
host_x86_CALL(block, codegen_mem_load_byte);
} else if (REG_IS_W(dest_size)) {
host_x86_CALL(block, codegen_mem_load_word);
} else if (REG_IS_L(dest_size)) {
host_x86_CALL(block, codegen_mem_load_long);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MEM_LOAD_ABS - %02x\n", uop->dest_reg_a_real);
# endif
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
if (REG_IS_B(dest_size)) {
host_x86_MOV8_REG_REG(block, dest_reg, REG_ECX);
} else if (REG_IS_W(dest_size)) {
host_x86_MOV16_REG_REG(block, dest_reg, REG_ECX);
} else if (REG_IS_L(dest_size)) {
host_x86_MOV32_REG_REG(block, dest_reg, REG_ECX);
}
return 0;
}
static int
codegen_MEM_LOAD_REG(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
if (uop->imm_data)
host_x86_ADD32_REG_IMM(block, REG_ESI, uop->imm_data);
if (REG_IS_B(dest_size)) {
host_x86_CALL(block, codegen_mem_load_byte);
} else if (REG_IS_W(dest_size)) {
host_x86_CALL(block, codegen_mem_load_word);
} else if (REG_IS_L(dest_size)) {
host_x86_CALL(block, codegen_mem_load_long);
} else if (REG_IS_Q(dest_size)) {
host_x86_CALL(block, codegen_mem_load_quad);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MEM_LOAD_REG - %02x\n", uop->dest_reg_a_real);
# endif
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
if (REG_IS_B(dest_size)) {
host_x86_MOV8_REG_REG(block, dest_reg, REG_ECX);
} else if (REG_IS_W(dest_size)) {
host_x86_MOV16_REG_REG(block, dest_reg, REG_ECX);
} else if (REG_IS_L(dest_size)) {
host_x86_MOV32_REG_REG(block, dest_reg, REG_ECX);
} else if (REG_IS_Q(dest_size)) {
host_x86_MOVQ_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
}
return 0;
}
static int
codegen_MEM_LOAD_SINGLE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
# ifdef RECOMPILER_DEBUG
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (!REG_IS_D(dest_size))
fatal("MEM_LOAD_SINGLE - %02x\n", uop->dest_reg_a_real);
# endif
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
if (uop->imm_data)
host_x86_ADD32_REG_IMM(block, REG_ESI, uop->imm_data);
host_x86_CALL(block, codegen_mem_load_single);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
host_x86_MOVQ_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
return 0;
}
static int
codegen_MEM_LOAD_DOUBLE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
# ifdef RECOMPILER_DEBUG
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (!REG_IS_D(dest_size))
fatal("MEM_LOAD_DOUBLE - %02x\n", uop->dest_reg_a_real);
# endif
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
if (uop->imm_data)
host_x86_ADD32_REG_IMM(block, REG_ESI, uop->imm_data);
host_x86_CALL(block, codegen_mem_load_double);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
host_x86_MOVQ_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
return 0;
}
static int
codegen_MEM_STORE_ABS(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_size = IREG_GET_SIZE(uop->src_reg_b_real);
host_x86_LEA_REG_IMM(block, REG_ESI, seg_reg, uop->imm_data);
if (REG_IS_B(src_size)) {
host_x86_MOV8_REG_REG(block, REG_ECX, src_reg);
host_x86_CALL(block, codegen_mem_store_byte);
} else if (REG_IS_W(src_size)) {
host_x86_MOV16_REG_REG(block, REG_ECX, src_reg);
host_x86_CALL(block, codegen_mem_store_word);
} else if (REG_IS_L(src_size)) {
host_x86_MOV32_REG_REG(block, REG_ECX, src_reg);
host_x86_CALL(block, codegen_mem_store_long);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MEM_STORE_ABS - %02x\n", uop->src_reg_b_real);
# endif
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_REG(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_reg = HOST_REG_GET(uop->src_reg_c_real);
int src_size = IREG_GET_SIZE(uop->src_reg_c_real);
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
if (uop->imm_data)
host_x86_ADD32_REG_IMM(block, REG_ESI, uop->imm_data);
if (REG_IS_B(src_size)) {
host_x86_MOV8_REG_REG(block, REG_ECX, src_reg);
host_x86_CALL(block, codegen_mem_store_byte);
} else if (REG_IS_W(src_size)) {
host_x86_MOV16_REG_REG(block, REG_ECX, src_reg);
host_x86_CALL(block, codegen_mem_store_word);
} else if (REG_IS_L(src_size)) {
host_x86_MOV32_REG_REG(block, REG_ECX, src_reg);
host_x86_CALL(block, codegen_mem_store_long);
} else if (REG_IS_Q(src_size)) {
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg);
host_x86_CALL(block, codegen_mem_store_quad);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MEM_STORE_REG - %02x\n", uop->src_reg_b_real);
# endif
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_IMM_8(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
host_x86_MOV8_REG_IMM(block, REG_ECX, uop->imm_data);
host_x86_CALL(block, codegen_mem_store_byte);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_IMM_16(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
host_x86_MOV16_REG_IMM(block, REG_ECX, uop->imm_data);
host_x86_CALL(block, codegen_mem_store_word);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_IMM_32(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
host_x86_MOV32_REG_IMM(block, REG_ECX, uop->imm_data);
host_x86_CALL(block, codegen_mem_store_long);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_SINGLE(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_reg = HOST_REG_GET(uop->src_reg_c_real);
# ifdef RECOMPILER_DEBUG
int src_size = IREG_GET_SIZE(uop->src_reg_c_real);
if (!REG_IS_D(src_size))
fatal("MEM_STORE_SINGLE - %02x\n", uop->src_reg_b_real);
# endif
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
if (uop->imm_data)
host_x86_ADD32_REG_IMM(block, REG_ESI, uop->imm_data);
host_x86_CVTSD2SS_XREG_XREG(block, REG_XMM_TEMP, src_reg);
host_x86_CALL(block, codegen_mem_store_single);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_DOUBLE(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_reg = HOST_REG_GET(uop->src_reg_c_real);
# ifdef RECOMPILER_DEBUG
int src_size = IREG_GET_SIZE(uop->src_reg_c_real);
if (!REG_IS_D(src_size))
fatal("MEM_STORE_DOUBLE - %02x\n", uop->src_reg_b_real);
# endif
host_x86_LEA_REG_REG(block, REG_ESI, seg_reg, addr_reg);
if (uop->imm_data)
host_x86_ADD32_REG_IMM(block, REG_ESI, uop->imm_data);
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg);
host_x86_CALL(block, codegen_mem_store_double);
host_x86_TEST32_REG(block, REG_ESI, REG_ESI);
host_x86_JNZ(block, codegen_exit_rout);
return 0;
}
static int
codegen_MOV(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
} else if (REG_IS_D(dest_size) && REG_IS_D(src_size)) {
host_x86_MOVQ_XREG_XREG(block, dest_reg, src_reg);
} else if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
host_x86_MOVQ_XREG_XREG(block, dest_reg, src_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOV %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_MOV_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_L(dest_size)) {
host_x86_MOV32_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size)) {
host_x86_MOV16_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size)) {
host_x86_MOV8_REG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOV_IMM %02x\n", uop->dest_reg_a_real);
# endif
return 0;
}
static int
codegen_MOV_PTR(codeblock_t *block, uop_t *uop)
{
host_x86_MOV32_REG_IMM(block, uop->dest_reg_a_real, (uint32_t) uop->p);
return 0;
}
static int
codegen_MOV_REG_PTR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_L(dest_size)) {
host_x86_MOV32_REG_ABS(block, dest_reg, uop->p);
} else
fatal("MOV_REG_PTR %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_MOVZX_REG_PTR_8(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_L(dest_size)) {
host_x86_MOVZX_REG_ABS_32_8(block, dest_reg, uop->p);
} else if (REG_IS_W(dest_size)) {
host_x86_MOVZX_REG_ABS_16_8(block, dest_reg, uop->p);
} else if (REG_IS_B(dest_size)) {
host_x86_MOV8_REG_ABS(block, dest_reg, uop->p);
} else
fatal("MOVZX_REG_PTR_8 %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_MOVZX_REG_PTR_16(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_L(dest_size)) {
host_x86_MOVZX_REG_ABS_32_16(block, dest_reg, uop->p);
} else if (REG_IS_W(dest_size)) {
host_x86_MOV16_REG_ABS(block, dest_reg, uop->p);
} else
fatal("MOVZX_REG_PTR_16 %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_MOVSX(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_W(src_size)) {
host_x86_MOVSX_REG_32_16(block, dest_reg, src_reg);
} else if (REG_IS_L(dest_size) && REG_IS_B(src_size)) {
host_x86_MOVSX_REG_32_8(block, dest_reg, src_reg);
} else if (REG_IS_W(dest_size) && REG_IS_B(src_size)) {
host_x86_MOVSX_REG_16_8(block, dest_reg, src_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOVSX %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_MOVZX(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_L(src_size)) {
host_x86_MOVD_XREG_REG(block, dest_reg, src_reg);
} else if (REG_IS_L(dest_size) && REG_IS_Q(src_size)) {
host_x86_MOVD_REG_XREG(block, dest_reg, src_reg);
} else if (REG_IS_L(dest_size) && REG_IS_W(src_size)) {
host_x86_MOVZX_REG_32_16(block, dest_reg, src_reg);
} else if (REG_IS_L(dest_size) && REG_IS_B(src_size)) {
host_x86_MOVZX_REG_32_8(block, dest_reg, src_reg);
} else if (REG_IS_W(dest_size) && REG_IS_B(src_size)) {
host_x86_MOVZX_REG_16_8(block, dest_reg, src_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOVZX %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_MOV_DOUBLE_INT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_L(src_size)) {
host_x86_CVTSI2SD_XREG_REG(block, dest_reg, src_reg);
} else if (REG_IS_D(dest_size) && REG_IS_W(src_size)) {
host_x86_MOVSX_REG_32_16(block, REG_ECX, src_reg);
host_x86_CVTSI2SD_XREG_REG(block, dest_reg, REG_ECX);
} else if (REG_IS_D(dest_size) && REG_IS_Q(src_size)) {
/*There is no SSE instruction to convert a 64-bit integer to a floating point value.
Instead we have to bounce the integer through memory via x87.*/
host_x86_MOVQ_BASE_OFFSET_XREG(block, REG_ESP, 0, src_reg);
host_x87_FILDq_BASE(block, REG_ESP);
host_x87_FSTPd_BASE(block, REG_ESP);
host_x86_MOVQ_XREG_BASE_OFFSET(block, dest_reg, REG_ESP, 0);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOV_DOUBLE_INT %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_MOV_INT_DOUBLE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_D(src_size)) {
host_x86_LDMXCSR(block, &cpu_state.new_fp_control);
host_x86_CVTSD2SI_REG_XREG(block, dest_reg, src_reg);
host_x86_LDMXCSR(block, &cpu_state.old_fp_control);
} else if (REG_IS_W(dest_size) && REG_IS_D(src_size)) {
host_x86_LDMXCSR(block, &cpu_state.new_fp_control);
host_x86_CVTSD2SI_REG_XREG(block, REG_ECX, src_reg);
host_x86_MOV16_REG_REG(block, dest_reg, REG_ECX);
host_x86_LDMXCSR(block, &cpu_state.old_fp_control);
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOV_INT_DOUBLE %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_MOV_INT_DOUBLE_64(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_64_reg = HOST_REG_GET(uop->src_reg_b_real);
int tag_reg = HOST_REG_GET(uop->src_reg_c_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
int src_64_size = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_D(src_size) && REG_IS_Q(src_64_size)) {
uint32_t *branch_offset;
/*If TAG_UINT64 is set then the source is MM[]. Otherwise it is a double in ST()*/
host_x86_MOVQ_XREG_XREG(block, dest_reg, src_64_reg);
host_x86_TEST8_REG(block, tag_reg, tag_reg);
branch_offset = host_x86_JS_long(block);
/*There is no SSE instruction to convert a floating point value to a 64-bit integer.
Instead we have to bounce through memory via x87.*/
host_x87_FLDCW(block, &cpu_state.new_fp_control2);
host_x86_MOVQ_BASE_OFFSET_XREG(block, REG_ESP, 0, src_reg);
host_x87_FLDd_BASE(block, REG_ESP);
host_x87_FISTPq_BASE(block, REG_ESP);
host_x86_MOVQ_XREG_BASE_OFFSET(block, dest_reg, REG_ESP, 0);
host_x87_FLDCW(block, &cpu_state.old_fp_control2);
*branch_offset = (uint32_t) ((uintptr_t) &block_write_data[block_pos] - (uintptr_t) branch_offset) - 4;
}
# ifdef RECOMPILER_DEBUG
else
fatal("MOV_INT_DOUBLE_64 %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_NOP(UNUSED(codeblock_t *block), UNUSED(uop_t *uop))
{
return 0;
}
static int
codegen_OR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_POR_XREG_XREG(block, dest_reg, src_reg_b);
} else if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg_a);
host_x86_OR32_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg_a);
host_x86_OR16_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg_a);
host_x86_OR8_REG_REG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("OR %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_OR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_L(dest_size) && dest_reg == src_reg) {
host_x86_OR32_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && dest_reg == src_reg) {
host_x86_OR16_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && dest_reg == src_reg) {
host_x86_OR8_REG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("OR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PACKSSWB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PACKSSWB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PACKSSWB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PACKSSDW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PACKSSDW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PACKSSDW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PACKUSWB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PACKUSWB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PACKUSWB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDSB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDSW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDUSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDUSB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDUSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PADDUSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PADDUSW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PADDUSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PCMPEQB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PCMPEQB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PCMPEQB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PCMPEQW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PCMPEQW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PCMPEQW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PCMPEQD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PCMPEQD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PCMPEQD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PCMPGTB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PCMPGTB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PCMPGTB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PCMPGTW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PCMPGTW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PCMPGTW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PCMPGTD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PCMPGTD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PCMPGTD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PF2ID(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
host_x86_LDMXCSR(block, &cpu_state.trunc_fp_control);
host_x86_CVTPS2DQ_XREG_XREG(block, dest_reg, src_reg_a);
host_x86_LDMXCSR(block, &cpu_state.old_fp_control);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PF2ID %02x %02x\n", uop->dest_reg_a_real);
# endif
return 0;
}
static int
codegen_PFADD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_ADDPS_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFADD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFCMPEQ(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_CMPPS_XREG_XREG(block, dest_reg, src_reg_b, CMPPS_EQ);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFCMPEQ %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFCMPGE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_CMPPS_XREG_XREG(block, dest_reg, src_reg_b, CMPPS_NLT);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFCMPGE %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFCMPGT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_CMPPS_XREG_XREG(block, dest_reg, src_reg_b, CMPPS_NLE);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFCMPGT %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFMAX(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_MAXPS_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFMAX %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFMIN(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_MINPS_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFMIN %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFMUL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_MULPS_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFMUL %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PFRCP(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
/*TODO: This could be improved (use RCPSS + iteration)*/
host_x86_MOV32_REG_IMM(block, REG_ECX, 1);
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg_a);
host_x86_CVTSI2SS_XREG_REG(block, dest_reg, REG_ECX);
host_x86_DIVSS_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
host_x86_UNPCKLPS_XREG_XREG(block, dest_reg, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFRCP %02x %02x\n", uop->dest_reg_a_real);
# endif
return 0;
}
static int
codegen_PFRSQRT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
/*TODO: This could be improved (use RSQRTSS + iteration)*/
host_x86_SQRTSS_XREG_XREG(block, REG_XMM_TEMP, src_reg_a);
host_x86_MOV32_REG_IMM(block, REG_ECX, 1);
host_x86_CVTSI2SS_XREG_REG(block, dest_reg, REG_ECX);
host_x86_DIVSS_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
host_x86_UNPCKLPS_XREG_XREG(block, dest_reg, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFRSQRT %02x %02x\n", uop->dest_reg_a_real);
# endif
return 0;
}
static int
codegen_PFSUB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_SUBPS_XREG_XREG(block, dest_reg, src_reg_b);
} else if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_x86_MOVQ_XREG_XREG(block, REG_XMM_TEMP, src_reg_a);
host_x86_SUBPS_XREG_XREG(block, REG_XMM_TEMP, src_reg_b);
host_x86_MOVQ_XREG_XREG(block, dest_reg, REG_XMM_TEMP);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PFSUB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PI2FD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
host_x86_CVTDQ2PS_XREG_XREG(block, dest_reg, src_reg_a);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PI2FD %02x %02x\n", uop->dest_reg_a_real);
# endif
return 0;
}
static int
codegen_PMADDWD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PMADDWD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PMULHW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PMULHW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PMULHW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PMULHW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PMULLW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PMULLW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PMULLW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSLLW_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSLLW_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSLLW_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSLLD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSLLD_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSLLD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSLLQ_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSLLQ_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSLLQ_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSRAW_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSRAW_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSRAW_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSRAD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSRAD_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSRAD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSRAQ_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSRAQ_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSRAQ_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSRLW_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSRLW_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSRLW_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSRLD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSRLD_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSRLD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSRLQ_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_Q(dest_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSRLQ_XREG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSRLQ_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_PSUBB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSUBW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSUBD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSUBSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBSB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSUBSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBSW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSUBUSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBUSB_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBUSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PSUBUSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PSUBUSW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PSUBUSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PUNPCKHBW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PUNPCKLBW_XREG_XREG(block, dest_reg, src_reg_b);
host_x86_PSHUFD_XREG_XREG_IMM(block, dest_reg, dest_reg, 0xee); /*0xee = move top 64-bits to low 64-bits*/
}
# ifdef RECOMPILER_DEBUG
else
fatal("PUNPCKHBW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PUNPCKHWD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PUNPCKLWD_XREG_XREG(block, dest_reg, src_reg_b);
host_x86_PSHUFD_XREG_XREG_IMM(block, dest_reg, dest_reg, 0xee); /*0xee = move top 64-bits to low 64-bits*/
}
# ifdef RECOMPILER_DEBUG
else
fatal("PUNPCKHWD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PUNPCKHDQ(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PUNPCKLDQ_XREG_XREG(block, dest_reg, src_reg_b);
host_x86_PSHUFD_XREG_XREG_IMM(block, dest_reg, dest_reg, 0xee); /*0xee = move top 64-bits to low 64-bits*/
}
# ifdef RECOMPILER_DEBUG
else
fatal("PUNPCKHDQ %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PUNPCKLBW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PUNPCKLBW_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PUNPCKLBW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PUNPCKLWD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PUNPCKLWD_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PUNPCKLWD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_PUNPCKLDQ(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PUNPCKLDQ_XREG_XREG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("PUNPCKLDQ %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_ROL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
host_x86_MOV32_REG_REG(block, REG_ECX, shift_reg);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_ROL32_CL(block, dest_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_ROL16_CL(block, dest_reg);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_ROL8_CL(block, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ROL %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_ROL_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_ROL32_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_ROL16_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_ROL8_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ROL_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_ROR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
host_x86_MOV32_REG_REG(block, REG_ECX, shift_reg);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_ROR32_CL(block, dest_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_ROR16_CL(block, dest_reg);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_ROR8_CL(block, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ROR %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_ROR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_ROR32_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_ROR16_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_ROR8_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("ROR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_SAR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
host_x86_MOV32_REG_REG(block, REG_ECX, shift_reg);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SAR32_CL(block, dest_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SAR16_CL(block, dest_reg);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SAR8_CL(block, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SAR %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_SAR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SAR32_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SAR16_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SAR8_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SAR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_SHL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
host_x86_MOV32_REG_REG(block, REG_ECX, shift_reg);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SHL32_CL(block, dest_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SHL16_CL(block, dest_reg);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SHL8_CL(block, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SHL %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_SHL_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SHL32_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SHL16_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SHL8_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SHL_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_SHR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
host_x86_MOV32_REG_REG(block, REG_ECX, shift_reg);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SHR32_CL(block, dest_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SHR16_CL(block, dest_reg);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SHR8_CL(block, dest_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SHR %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_SHR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SHR32_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SHR16_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SHR8_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SHR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_STORE_PTR_IMM(codeblock_t *block, uop_t *uop)
{
host_x86_MOV32_ABS_IMM(block, uop->p, uop->imm_data);
return 0;
}
static int
codegen_STORE_PTR_IMM_8(codeblock_t *block, uop_t *uop)
{
host_x86_MOV8_ABS_IMM(block, uop->p, uop->imm_data);
return 0;
}
static int
codegen_STORE_PTR_IMM_16(codeblock_t *block, uop_t *uop)
{
host_x86_MOV16_ABS_IMM(block, uop->p, uop->imm_data);
return 0;
}
static int
codegen_SUB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg_a);
host_x86_SUB32_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg_a);
host_x86_SUB16_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
if (uop->dest_reg_a_real != uop->src_reg_a_real)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg_a);
host_x86_SUB8_REG_REG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SUB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_SUB_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (dest_reg != src_reg)
host_x86_MOV32_REG_REG(block, dest_reg, src_reg);
host_x86_SUB32_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if (dest_reg != src_reg)
host_x86_MOV16_REG_REG(block, dest_reg, src_reg);
host_x86_SUB16_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if (dest_reg != src_reg)
host_x86_MOV8_REG_REG(block, dest_reg, src_reg);
host_x86_SUB8_REG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("SUB_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
static int
codegen_TEST_JNS_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_x86_TEST32_REG(block, src_reg, src_reg);
} else if (REG_IS_W(src_size)) {
host_x86_TEST16_REG(block, src_reg, src_reg);
} else if (REG_IS_B(src_size)) {
host_x86_TEST8_REG(block, src_reg, src_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("TEST_JNS_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JNS_long(block);
return 0;
}
static int
codegen_TEST_JS_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_x86_TEST32_REG(block, src_reg, src_reg);
} else if (REG_IS_W(src_size)) {
host_x86_TEST16_REG(block, src_reg, src_reg);
} else if (REG_IS_B(src_size)) {
host_x86_TEST8_REG(block, src_reg, src_reg);
}
# ifdef RECOMPILER_DEBUG
else
fatal("TEST_JS_DEST %02x\n", uop->src_reg_a_real);
# endif
uop->p = host_x86_JS_long(block);
return 0;
}
static int
codegen_XOR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_PXOR_XREG_XREG(block, dest_reg, src_reg_b);
} else if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_XOR32_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_XOR16_REG_REG(block, dest_reg, src_reg_b);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_XOR8_REG_REG(block, dest_reg, src_reg_b);
}
# ifdef RECOMPILER_DEBUG
else
fatal("XOR %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
# endif
return 0;
}
static int
codegen_XOR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_XOR32_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_XOR16_REG_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size) && uop->dest_reg_a_real == uop->src_reg_a_real) {
host_x86_XOR8_REG_IMM(block, dest_reg, uop->imm_data);
}
# ifdef RECOMPILER_DEBUG
else
fatal("XOR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
# endif
return 0;
}
# ifdef DEBUG_EXTRA
static int
codegen_LOG_INSTR(codeblock_t *block, uop_t *uop)
{
if (uop->imm_data > 256 * 256)
fatal("LOG_INSTR %08x\n", uop->imm_data);
host_x86_INC32_ABS(block, &instr_counts[uop->imm_data]);
return 0;
}
# endif
const uOpFn uop_handlers[UOP_MAX] = {
[UOP_CALL_FUNC & UOP_MASK] = codegen_CALL_FUNC,
[UOP_CALL_FUNC_RESULT &
UOP_MASK]
= codegen_CALL_FUNC_RESULT,
[UOP_CALL_INSTRUCTION_FUNC &
UOP_MASK]
= codegen_CALL_INSTRUCTION_FUNC,
[UOP_JMP &
UOP_MASK]
= codegen_JMP,
[UOP_JMP_DEST &
UOP_MASK]
= codegen_JMP_DEST,
[UOP_LOAD_SEG &
UOP_MASK]
= codegen_LOAD_SEG,
[UOP_LOAD_FUNC_ARG_0 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG0,
[UOP_LOAD_FUNC_ARG_1 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG1,
[UOP_LOAD_FUNC_ARG_2 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG2,
[UOP_LOAD_FUNC_ARG_3 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG3,
[UOP_LOAD_FUNC_ARG_0_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG0_IMM,
[UOP_LOAD_FUNC_ARG_1_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG1_IMM,
[UOP_LOAD_FUNC_ARG_2_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG2_IMM,
[UOP_LOAD_FUNC_ARG_3_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG3_IMM,
[UOP_STORE_P_IMM &
UOP_MASK]
= codegen_STORE_PTR_IMM,
[UOP_STORE_P_IMM_8 &
UOP_MASK]
= codegen_STORE_PTR_IMM_8,
[UOP_STORE_P_IMM_16 &
UOP_MASK]
= codegen_STORE_PTR_IMM_16,
[UOP_MEM_LOAD_ABS &
UOP_MASK]
= codegen_MEM_LOAD_ABS,
[UOP_MEM_LOAD_REG &
UOP_MASK]
= codegen_MEM_LOAD_REG,
[UOP_MEM_LOAD_SINGLE &
UOP_MASK]
= codegen_MEM_LOAD_SINGLE,
[UOP_MEM_LOAD_DOUBLE &
UOP_MASK]
= codegen_MEM_LOAD_DOUBLE,
[UOP_MEM_STORE_ABS &
UOP_MASK]
= codegen_MEM_STORE_ABS,
[UOP_MEM_STORE_REG &
UOP_MASK]
= codegen_MEM_STORE_REG,
[UOP_MEM_STORE_IMM_8 &
UOP_MASK]
= codegen_MEM_STORE_IMM_8,
[UOP_MEM_STORE_IMM_16 &
UOP_MASK]
= codegen_MEM_STORE_IMM_16,
[UOP_MEM_STORE_IMM_32 &
UOP_MASK]
= codegen_MEM_STORE_IMM_32,
[UOP_MEM_STORE_SINGLE &
UOP_MASK]
= codegen_MEM_STORE_SINGLE,
[UOP_MEM_STORE_DOUBLE &
UOP_MASK]
= codegen_MEM_STORE_DOUBLE,
[UOP_MOV &
UOP_MASK]
= codegen_MOV,
[UOP_MOV_PTR &
UOP_MASK]
= codegen_MOV_PTR,
[UOP_MOV_IMM &
UOP_MASK]
= codegen_MOV_IMM,
[UOP_MOVSX &
UOP_MASK]
= codegen_MOVSX,
[UOP_MOVZX &
UOP_MASK]
= codegen_MOVZX,
[UOP_MOV_DOUBLE_INT &
UOP_MASK]
= codegen_MOV_DOUBLE_INT,
[UOP_MOV_INT_DOUBLE &
UOP_MASK]
= codegen_MOV_INT_DOUBLE,
[UOP_MOV_INT_DOUBLE_64 &
UOP_MASK]
= codegen_MOV_INT_DOUBLE_64,
[UOP_MOV_REG_PTR &
UOP_MASK]
= codegen_MOV_REG_PTR,
[UOP_MOVZX_REG_PTR_8 &
UOP_MASK]
= codegen_MOVZX_REG_PTR_8,
[UOP_MOVZX_REG_PTR_16 &
UOP_MASK]
= codegen_MOVZX_REG_PTR_16,
[UOP_ADD &
UOP_MASK]
= codegen_ADD,
[UOP_ADD_IMM &
UOP_MASK]
= codegen_ADD_IMM,
[UOP_ADD_LSHIFT &
UOP_MASK]
= codegen_ADD_LSHIFT,
[UOP_AND &
UOP_MASK]
= codegen_AND,
[UOP_AND_IMM &
UOP_MASK]
= codegen_AND_IMM,
[UOP_ANDN &
UOP_MASK]
= codegen_ANDN,
[UOP_OR &
UOP_MASK]
= codegen_OR,
[UOP_OR_IMM &
UOP_MASK]
= codegen_OR_IMM,
[UOP_SUB &
UOP_MASK]
= codegen_SUB,
[UOP_SUB_IMM &
UOP_MASK]
= codegen_SUB_IMM,
[UOP_XOR &
UOP_MASK]
= codegen_XOR,
[UOP_XOR_IMM &
UOP_MASK]
= codegen_XOR_IMM,
[UOP_SAR &
UOP_MASK]
= codegen_SAR,
[UOP_SAR_IMM &
UOP_MASK]
= codegen_SAR_IMM,
[UOP_SHL &
UOP_MASK]
= codegen_SHL,
[UOP_SHL_IMM &
UOP_MASK]
= codegen_SHL_IMM,
[UOP_SHR &
UOP_MASK]
= codegen_SHR,
[UOP_SHR_IMM &
UOP_MASK]
= codegen_SHR_IMM,
[UOP_ROL &
UOP_MASK]
= codegen_ROL,
[UOP_ROL_IMM &
UOP_MASK]
= codegen_ROL_IMM,
[UOP_ROR &
UOP_MASK]
= codegen_ROR,
[UOP_ROR_IMM &
UOP_MASK]
= codegen_ROR_IMM,
[UOP_CMP_IMM_JZ &
UOP_MASK]
= codegen_CMP_IMM_JZ,
[UOP_CMP_JB &
UOP_MASK]
= codegen_CMP_JB,
[UOP_CMP_JNBE &
UOP_MASK]
= codegen_CMP_JNBE,
[UOP_CMP_JNB_DEST &
UOP_MASK]
= codegen_CMP_JNB_DEST,
[UOP_CMP_JNBE_DEST &
UOP_MASK]
= codegen_CMP_JNBE_DEST,
[UOP_CMP_JNL_DEST &
UOP_MASK]
= codegen_CMP_JNL_DEST,
[UOP_CMP_JNLE_DEST &
UOP_MASK]
= codegen_CMP_JNLE_DEST,
[UOP_CMP_JNO_DEST &
UOP_MASK]
= codegen_CMP_JNO_DEST,
[UOP_CMP_JNZ_DEST &
UOP_MASK]
= codegen_CMP_JNZ_DEST,
[UOP_CMP_JB_DEST &
UOP_MASK]
= codegen_CMP_JB_DEST,
[UOP_CMP_JBE_DEST &
UOP_MASK]
= codegen_CMP_JBE_DEST,
[UOP_CMP_JL_DEST &
UOP_MASK]
= codegen_CMP_JL_DEST,
[UOP_CMP_JLE_DEST &
UOP_MASK]
= codegen_CMP_JLE_DEST,
[UOP_CMP_JO_DEST &
UOP_MASK]
= codegen_CMP_JO_DEST,
[UOP_CMP_JZ_DEST &
UOP_MASK]
= codegen_CMP_JZ_DEST,
[UOP_CMP_IMM_JNZ_DEST &
UOP_MASK]
= codegen_CMP_IMM_JNZ_DEST,
[UOP_CMP_IMM_JZ_DEST &
UOP_MASK]
= codegen_CMP_IMM_JZ_DEST,
[UOP_TEST_JNS_DEST &
UOP_MASK]
= codegen_TEST_JNS_DEST,
[UOP_TEST_JS_DEST &
UOP_MASK]
= codegen_TEST_JS_DEST,
[UOP_FP_ENTER &
UOP_MASK]
= codegen_FP_ENTER,
[UOP_MMX_ENTER &
UOP_MASK]
= codegen_MMX_ENTER,
[UOP_FADD &
UOP_MASK]
= codegen_FADD,
[UOP_FDIV &
UOP_MASK]
= codegen_FDIV,
[UOP_FMUL &
UOP_MASK]
= codegen_FMUL,
[UOP_FSUB &
UOP_MASK]
= codegen_FSUB,
[UOP_FCOM &
UOP_MASK]
= codegen_FCOM,
[UOP_FABS &
UOP_MASK]
= codegen_FABS,
[UOP_FCHS &
UOP_MASK]
= codegen_FCHS,
[UOP_FSQRT &
UOP_MASK]
= codegen_FSQRT,
[UOP_FTST &
UOP_MASK]
= codegen_FTST,
[UOP_PACKSSWB &
UOP_MASK]
= codegen_PACKSSWB,
[UOP_PACKSSDW &
UOP_MASK]
= codegen_PACKSSDW,
[UOP_PACKUSWB &
UOP_MASK]
= codegen_PACKUSWB,
[UOP_PADDB &
UOP_MASK]
= codegen_PADDB,
[UOP_PADDW &
UOP_MASK]
= codegen_PADDW,
[UOP_PADDD &
UOP_MASK]
= codegen_PADDD,
[UOP_PADDSB &
UOP_MASK]
= codegen_PADDSB,
[UOP_PADDSW &
UOP_MASK]
= codegen_PADDSW,
[UOP_PADDUSB &
UOP_MASK]
= codegen_PADDUSB,
[UOP_PADDUSW &
UOP_MASK]
= codegen_PADDUSW,
[UOP_PCMPEQB &
UOP_MASK]
= codegen_PCMPEQB,
[UOP_PCMPEQW &
UOP_MASK]
= codegen_PCMPEQW,
[UOP_PCMPEQD &
UOP_MASK]
= codegen_PCMPEQD,
[UOP_PCMPGTB &
UOP_MASK]
= codegen_PCMPGTB,
[UOP_PCMPGTW &
UOP_MASK]
= codegen_PCMPGTW,
[UOP_PCMPGTD &
UOP_MASK]
= codegen_PCMPGTD,
[UOP_PF2ID &
UOP_MASK]
= codegen_PF2ID,
[UOP_PFADD &
UOP_MASK]
= codegen_PFADD,
[UOP_PFCMPEQ &
UOP_MASK]
= codegen_PFCMPEQ,
[UOP_PFCMPGE &
UOP_MASK]
= codegen_PFCMPGE,
[UOP_PFCMPGT &
UOP_MASK]
= codegen_PFCMPGT,
[UOP_PFMAX &
UOP_MASK]
= codegen_PFMAX,
[UOP_PFMIN &
UOP_MASK]
= codegen_PFMIN,
[UOP_PFMUL &
UOP_MASK]
= codegen_PFMUL,
[UOP_PFRCP &
UOP_MASK]
= codegen_PFRCP,
[UOP_PFRSQRT &
UOP_MASK]
= codegen_PFRSQRT,
[UOP_PFSUB &
UOP_MASK]
= codegen_PFSUB,
[UOP_PI2FD &
UOP_MASK]
= codegen_PI2FD,
[UOP_PMADDWD &
UOP_MASK]
= codegen_PMADDWD,
[UOP_PMULHW &
UOP_MASK]
= codegen_PMULHW,
[UOP_PMULLW &
UOP_MASK]
= codegen_PMULLW,
[UOP_PSLLW_IMM &
UOP_MASK]
= codegen_PSLLW_IMM,
[UOP_PSLLD_IMM &
UOP_MASK]
= codegen_PSLLD_IMM,
[UOP_PSLLQ_IMM &
UOP_MASK]
= codegen_PSLLQ_IMM,
[UOP_PSRAW_IMM &
UOP_MASK]
= codegen_PSRAW_IMM,
[UOP_PSRAD_IMM &
UOP_MASK]
= codegen_PSRAD_IMM,
[UOP_PSRAQ_IMM &
UOP_MASK]
= codegen_PSRAQ_IMM,
[UOP_PSRLW_IMM &
UOP_MASK]
= codegen_PSRLW_IMM,
[UOP_PSRLD_IMM &
UOP_MASK]
= codegen_PSRLD_IMM,
[UOP_PSRLQ_IMM &
UOP_MASK]
= codegen_PSRLQ_IMM,
[UOP_PSUBB &
UOP_MASK]
= codegen_PSUBB,
[UOP_PSUBW &
UOP_MASK]
= codegen_PSUBW,
[UOP_PSUBD &
UOP_MASK]
= codegen_PSUBD,
[UOP_PSUBSB &
UOP_MASK]
= codegen_PSUBSB,
[UOP_PSUBSW &
UOP_MASK]
= codegen_PSUBSW,
[UOP_PSUBUSB &
UOP_MASK]
= codegen_PSUBUSB,
[UOP_PSUBUSW &
UOP_MASK]
= codegen_PSUBUSW,
[UOP_PUNPCKHBW &
UOP_MASK]
= codegen_PUNPCKHBW,
[UOP_PUNPCKHWD &
UOP_MASK]
= codegen_PUNPCKHWD,
[UOP_PUNPCKHDQ &
UOP_MASK]
= codegen_PUNPCKHDQ,
[UOP_PUNPCKLBW &
UOP_MASK]
= codegen_PUNPCKLBW,
[UOP_PUNPCKLWD &
UOP_MASK]
= codegen_PUNPCKLWD,
[UOP_PUNPCKLDQ &
UOP_MASK]
= codegen_PUNPCKLDQ,
[UOP_NOP_BARRIER &
UOP_MASK]
= codegen_NOP,
# ifdef DEBUG_EXTRA
[UOP_LOG_INSTR &
UOP_MASK]
= codegen_LOG_INSTR
# endif
};
void
codegen_direct_read_8(codeblock_t *block, int host_reg, void *p)
{
host_x86_MOV8_REG_ABS(block, host_reg, p);
}
void
codegen_direct_read_16(codeblock_t *block, int host_reg, void *p)
{
host_x86_MOV16_REG_ABS(block, host_reg, p);
}
void
codegen_direct_read_32(codeblock_t *block, int host_reg, void *p)
{
host_x86_MOV32_REG_ABS(block, host_reg, p);
}
void
codegen_direct_read_pointer(codeblock_t *block, int host_reg, void *p)
{
codegen_direct_read_32(block, host_reg, p);
}
void
codegen_direct_read_64(codeblock_t *block, int host_reg, void *p)
{
host_x86_MOVQ_XREG_ABS(block, host_reg, p);
}
void
codegen_direct_read_double(codeblock_t *block, int host_reg, void *p)
{
host_x86_MOVQ_XREG_ABS(block, host_reg, p);
}
void
codegen_direct_read_st_8(codeblock_t *block, int host_reg, void *base, int reg_idx)
{
int offset = (uintptr_t) base - (((uintptr_t) &cpu_state) + 128);
host_x86_MOV32_REG_BASE_OFFSET(block, REG_ECX, REG_ESP, IREG_TOP_diff_stack_offset);
host_x86_ADD32_REG_IMM(block, REG_ECX, reg_idx);
host_x86_AND32_REG_IMM(block, REG_ECX, 7);
host_x86_MOV8_REG_ABS_REG_REG_SHIFT(block, host_reg, offset, REG_EBP, REG_ECX, 0);
}
void
codegen_direct_read_st_64(codeblock_t *block, int host_reg, void *base, int reg_idx)
{
int offset = (uintptr_t) base - (((uintptr_t) &cpu_state) + 128);
host_x86_MOV32_REG_BASE_OFFSET(block, REG_ECX, REG_ESP, IREG_TOP_diff_stack_offset);
host_x86_ADD32_REG_IMM(block, REG_ECX, reg_idx);
host_x86_AND32_REG_IMM(block, REG_ECX, 7);
host_x86_MOVQ_XREG_ABS_REG_REG_SHIFT(block, host_reg, offset, REG_EBP, REG_ECX, 3);
}
void
codegen_direct_read_st_double(codeblock_t *block, int host_reg, void *base, int reg_idx)
{
int offset = (uintptr_t) base - (((uintptr_t) &cpu_state) + 128);
host_x86_MOV32_REG_BASE_OFFSET(block, REG_ECX, REG_ESP, IREG_TOP_diff_stack_offset);
host_x86_ADD32_REG_IMM(block, REG_ECX, reg_idx);
host_x86_AND32_REG_IMM(block, REG_ECX, 7);
host_x86_MOVQ_XREG_ABS_REG_REG_SHIFT(block, host_reg, offset, REG_EBP, REG_ECX, 3);
}
void
codegen_direct_write_8(codeblock_t *block, void *p, int host_reg)
{
host_x86_MOV8_ABS_REG(block, p, host_reg);
}
void
codegen_direct_write_16(codeblock_t *block, void *p, int host_reg)
{
host_x86_MOV16_ABS_REG(block, p, host_reg);
}
void
codegen_direct_write_32(codeblock_t *block, void *p, int host_reg)
{
host_x86_MOV32_ABS_REG(block, p, host_reg);
}
void
codegen_direct_write_64(codeblock_t *block, void *p, int host_reg)
{
host_x86_MOVQ_ABS_XREG(block, p, host_reg);
}
void
codegen_direct_write_double(codeblock_t *block, void *p, int host_reg)
{
host_x86_MOVQ_ABS_XREG(block, p, host_reg);
}
void
codegen_direct_write_st_8(codeblock_t *block, void *base, int reg_idx, int host_reg)
{
int offset = (uintptr_t) base - (((uintptr_t) &cpu_state) + 128);
host_x86_MOV32_REG_BASE_OFFSET(block, REG_ECX, REG_ESP, IREG_TOP_diff_stack_offset);
host_x86_ADD32_REG_IMM(block, REG_ECX, reg_idx);
host_x86_AND32_REG_IMM(block, REG_ECX, 7);
host_x86_MOV8_ABS_REG_REG_SHIFT_REG(block, offset, REG_EBP, REG_ECX, 0, host_reg);
}
void
codegen_direct_write_st_64(codeblock_t *block, void *base, int reg_idx, int host_reg)
{
int offset = (uintptr_t) base - (((uintptr_t) &cpu_state) + 128);
host_x86_MOV32_REG_BASE_OFFSET(block, REG_ECX, REG_ESP, IREG_TOP_diff_stack_offset);
host_x86_ADD32_REG_IMM(block, REG_ECX, reg_idx);
host_x86_AND32_REG_IMM(block, REG_ECX, 7);
host_x86_MOVQ_ABS_REG_REG_SHIFT_XREG(block, offset, REG_EBP, REG_ECX, 3, host_reg);
}
void
codegen_direct_write_st_double(codeblock_t *block, void *base, int reg_idx, int host_reg)
{
int offset = (uintptr_t) base - (((uintptr_t) &cpu_state) + 128);
host_x86_MOV32_REG_BASE_OFFSET(block, REG_ECX, REG_ESP, IREG_TOP_diff_stack_offset);
host_x86_ADD32_REG_IMM(block, REG_ECX, reg_idx);
host_x86_AND32_REG_IMM(block, REG_ECX, 7);
host_x86_MOVQ_ABS_REG_REG_SHIFT_XREG(block, offset, REG_EBP, REG_ECX, 3, host_reg);
}
void
codegen_direct_write_ptr(codeblock_t *block, void *p, int host_reg)
{
host_x86_MOV32_ABS_REG(block, p, host_reg);
}
void
codegen_direct_read_16_stack(codeblock_t *block, int host_reg, int stack_offset)
{
host_x86_MOV16_REG_BASE_OFFSET(block, host_reg, REG_ESP, stack_offset);
}
void
codegen_direct_read_32_stack(codeblock_t *block, int host_reg, int stack_offset)
{
host_x86_MOV32_REG_BASE_OFFSET(block, host_reg, REG_ESP, stack_offset);
}
void
codegen_direct_read_pointer_stack(codeblock_t *block, int host_reg, int stack_offset)
{
codegen_direct_read_32_stack(block, host_reg, stack_offset);
}
void
codegen_direct_read_64_stack(codeblock_t *block, int host_reg, int stack_offset)
{
host_x86_MOVQ_XREG_BASE_OFFSET(block, host_reg, REG_ESP, stack_offset);
}
void
codegen_direct_read_double_stack(codeblock_t *block, int host_reg, int stack_offset)
{
host_x86_MOVQ_XREG_BASE_OFFSET(block, host_reg, REG_ESP, stack_offset);
}
void
codegen_direct_write_32_stack(codeblock_t *block, int stack_offset, int host_reg)
{
host_x86_MOV32_BASE_OFFSET_REG(block, REG_ESP, stack_offset, host_reg);
}
void
codegen_direct_write_64_stack(codeblock_t *block, int stack_offset, int host_reg)
{
host_x86_MOVQ_BASE_OFFSET_XREG(block, REG_ESP, stack_offset, host_reg);
}
void
codegen_direct_write_double_stack(codeblock_t *block, int stack_offset, int host_reg)
{
host_x86_MOVQ_BASE_OFFSET_XREG(block, REG_ESP, stack_offset, host_reg);
}
void
codegen_set_jump_dest(UNUSED(codeblock_t *block), void *p)
{
*(uint32_t *) p = (uintptr_t) &block_write_data[block_pos] - ((uintptr_t) p + 4);
}
void
codegen_direct_write_8_imm(codeblock_t *block, void *p, uint8_t imm_data)
{
host_x86_MOV8_ABS_IMM(block, p, imm_data);
}
void
codegen_direct_write_16_imm(codeblock_t *block, void *p, uint16_t imm_data)
{
host_x86_MOV16_ABS_IMM(block, p, imm_data);
}
void
codegen_direct_write_32_imm(codeblock_t *block, void *p, uint32_t imm_data)
{
host_x86_MOV32_ABS_IMM(block, p, imm_data);
}
void
codegen_direct_write_32_imm_stack(codeblock_t *block, int stack_offset, uint32_t imm_data)
{
host_x86_MOV32_BASE_OFFSET_IMM(block, REG_ESP, stack_offset, imm_data);
}
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_backend_x86_uops.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 39,069 |
```objective-c
uint32_t ropPF2ID(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFADD(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFCMPEQ(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFCMPGE(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFCMPGT(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFMAX(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFMIN(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFMUL(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFRCP(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFRCPIT(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFRSQRT(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFRSQIT1(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFSUB(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPFSUBR(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPI2FD(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
``` | /content/code_sandbox/src/codegen_new/codegen_ops_3dnow.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 652 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_flags.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include "386_common.h"
#include "codegen.h"
#include "codegen_ir.h"
#include "codegen_ops.h"
#include "codegen_ops_helpers.h"
#include "codegen_ops_misc.h"
uint32_t
ropLEA_16(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
int dest_reg = (fetchdat >> 3) & 7;
if ((fetchdat & 0xc0) == 0xc0)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
uop_MOV(ir, IREG_16(dest_reg), IREG_eaaddr_W);
return op_pc + 1;
}
uint32_t
ropLEA_32(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
int dest_reg = (fetchdat >> 3) & 7;
if ((fetchdat & 0xc0) == 0xc0)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
uop_MOV(ir, IREG_32(dest_reg), IREG_eaaddr);
return op_pc + 1;
}
uint32_t
ropF6(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg = NULL;
uint8_t imm_data;
int reg;
if (fetchdat & 0x20)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
if ((fetchdat & 0xc0) == 0xc0)
reg = IREG_8(fetchdat & 7);
else {
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
if ((fetchdat & 0x30) == 0x10) /*NEG/NOT*/
codegen_check_seg_write(block, ir, target_seg);
else
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0_B, ireg_seg_base(target_seg), IREG_eaaddr);
reg = IREG_temp0_B;
}
switch (fetchdat & 0x38) {
case 0x00:
case 0x08: /*TEST*/
imm_data = fastreadb(cs + op_pc + 1);
uop_AND_IMM(ir, IREG_flags_res_B, reg, imm_data);
uop_MOVZX(ir, IREG_flags_res, IREG_flags_res_B);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_ZN8);
codegen_flags_changed = 1;
codegen_mark_code_present(block, cs + op_pc + 1, 1);
return op_pc + 2;
case 0x10: /*NOT*/
uop_XOR_IMM(ir, reg, reg, 0xff);
if ((fetchdat & 0xc0) != 0xc0)
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, reg);
codegen_flags_changed = 1;
return op_pc + 1;
case 0x18: /*NEG*/
uop_MOV_IMM(ir, IREG_temp1_B, 0);
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOVZX(ir, IREG_flags_op2, reg);
uop_SUB(ir, IREG_temp1_B, IREG_temp1_B, reg);
uop_MOVZX(ir, IREG_flags_res, IREG_temp1_B);
uop_MOV(ir, reg, IREG_temp1_B);
} else {
uop_SUB(ir, IREG_temp1_B, IREG_temp1_B, reg);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1_B);
uop_MOVZX(ir, IREG_flags_op2, IREG_temp0_B);
uop_MOVZX(ir, IREG_flags_res, IREG_temp1_B);
}
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_SUB8);
uop_MOV_IMM(ir, IREG_flags_op1, 0);
codegen_flags_changed = 1;
return op_pc + 1;
default:
break;
}
return 0;
}
uint32_t
ropF7_16(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg = NULL;
uint16_t imm_data;
int reg;
if (fetchdat & 0x20)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
if ((fetchdat & 0xc0) == 0xc0)
reg = IREG_16(fetchdat & 7);
else {
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
if ((fetchdat & 0x30) == 0x10) /*NEG/NOT*/
codegen_check_seg_write(block, ir, target_seg);
else
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0_W, ireg_seg_base(target_seg), IREG_eaaddr);
reg = IREG_temp0_W;
}
switch (fetchdat & 0x38) {
case 0x00:
case 0x08: /*TEST*/
imm_data = fastreadw(cs + op_pc + 1);
uop_AND_IMM(ir, IREG_flags_res_W, reg, imm_data);
uop_MOVZX(ir, IREG_flags_res, IREG_flags_res_W);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_ZN16);
codegen_flags_changed = 1;
codegen_mark_code_present(block, cs + op_pc + 1, 2);
return op_pc + 3;
case 0x10: /*NOT*/
uop_XOR_IMM(ir, reg, reg, 0xffff);
if ((fetchdat & 0xc0) != 0xc0)
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, reg);
codegen_flags_changed = 1;
return op_pc + 1;
case 0x18: /*NEG*/
uop_MOV_IMM(ir, IREG_temp1_W, 0);
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOVZX(ir, IREG_flags_op2, reg);
uop_SUB(ir, IREG_temp1_W, IREG_temp1_W, reg);
uop_MOVZX(ir, IREG_flags_res, IREG_temp1_W);
uop_MOV(ir, reg, IREG_temp1_W);
} else {
uop_SUB(ir, IREG_temp1_W, IREG_temp1_W, reg);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1_W);
uop_MOVZX(ir, IREG_flags_op2, IREG_temp0_W);
uop_MOVZX(ir, IREG_flags_res, IREG_temp1_W);
}
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_SUB16);
uop_MOV_IMM(ir, IREG_flags_op1, 0);
codegen_flags_changed = 1;
return op_pc + 1;
default:
break;
}
return 0;
}
uint32_t
ropF7_32(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg = NULL;
uint32_t imm_data;
int reg;
if (fetchdat & 0x20)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
if ((fetchdat & 0xc0) == 0xc0)
reg = IREG_32(fetchdat & 7);
else {
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
if ((fetchdat & 0x30) == 0x10) /*NEG/NOT*/
codegen_check_seg_write(block, ir, target_seg);
else
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0, ireg_seg_base(target_seg), IREG_eaaddr);
reg = IREG_temp0;
}
switch (fetchdat & 0x38) {
case 0x00:
case 0x08: /*TEST*/
imm_data = fastreadl(cs + op_pc + 1);
uop_AND_IMM(ir, IREG_flags_res, reg, imm_data);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_ZN32);
codegen_flags_changed = 1;
codegen_mark_code_present(block, cs + op_pc + 1, 4);
return op_pc + 5;
case 0x10: /*NOT*/
uop_XOR_IMM(ir, reg, reg, 0xffffffff);
if ((fetchdat & 0xc0) != 0xc0)
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, reg);
codegen_flags_changed = 1;
return op_pc + 1;
case 0x18: /*NEG*/
uop_MOV_IMM(ir, IREG_temp1, 0);
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOV(ir, IREG_flags_op2, reg);
uop_SUB(ir, IREG_temp1, IREG_temp1, reg);
uop_MOV(ir, IREG_flags_res, IREG_temp1);
uop_MOV(ir, reg, IREG_temp1);
} else {
uop_SUB(ir, IREG_temp1, IREG_temp1, reg);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1);
uop_MOV(ir, IREG_flags_op2, IREG_temp0);
uop_MOV(ir, IREG_flags_res, IREG_temp1);
}
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_SUB32);
uop_MOV_IMM(ir, IREG_flags_op1, 0);
codegen_flags_changed = 1;
return op_pc + 1;
default:
break;
}
return 0;
}
static void
rebuild_c(ir_data_t *ir)
{
int needs_rebuild = 1;
if (codegen_flags_changed) {
switch (cpu_state.flags_op) {
case FLAGS_INC8:
case FLAGS_INC16:
case FLAGS_INC32:
case FLAGS_DEC8:
case FLAGS_DEC16:
case FLAGS_DEC32:
needs_rebuild = 0;
break;
default:
break;
}
}
if (needs_rebuild) {
uop_CALL_FUNC(ir, flags_rebuild_c);
}
}
uint32_t
ropFF_16(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg = NULL;
int src_reg;
int sp_reg;
if ((fetchdat & 0x38) != 0x00 && (fetchdat & 0x38) != 0x08 && (fetchdat & 0x38) != 0x10 && (fetchdat & 0x38) != 0x20 && (fetchdat & 0x38) != 0x28 && (fetchdat & 0x38) != 0x30)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
if ((fetchdat & 0xc0) == 0xc0) {
if ((fetchdat & 0x38) == 0x28)
return 0;
src_reg = IREG_16(fetchdat & 7);
} else {
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
if (!(fetchdat & 0x30)) /*INC/DEC*/
codegen_check_seg_write(block, ir, target_seg);
else
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0_W, ireg_seg_base(target_seg), IREG_eaaddr);
src_reg = IREG_temp0_W;
}
switch (fetchdat & 0x38) {
case 0x00: /*INC*/
rebuild_c(ir);
codegen_flags_changed = 1;
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOVZX(ir, IREG_flags_op1, src_reg);
uop_ADD_IMM(ir, src_reg, src_reg, 1);
uop_MOVZX(ir, IREG_flags_res, src_reg);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_INC16);
} else {
uop_ADD_IMM(ir, IREG_temp1_W, src_reg, 1);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1_W);
uop_MOVZX(ir, IREG_flags_op1, src_reg);
uop_MOVZX(ir, IREG_flags_res, IREG_temp1_W);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_INC16);
}
return op_pc + 1;
case 0x08: /*DEC*/
rebuild_c(ir);
codegen_flags_changed = 1;
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOVZX(ir, IREG_flags_op1, src_reg);
uop_SUB_IMM(ir, src_reg, src_reg, 1);
uop_MOVZX(ir, IREG_flags_res, src_reg);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_DEC16);
} else {
uop_SUB_IMM(ir, IREG_temp1_W, src_reg, 1);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1_W);
uop_MOVZX(ir, IREG_flags_op1, src_reg);
uop_MOVZX(ir, IREG_flags_res, IREG_temp1_W);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_DEC16);
}
return op_pc + 1;
case 0x10: /*CALL*/
if ((fetchdat & 0xc0) == 0xc0)
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
sp_reg = LOAD_SP_WITH_OFFSET(ir, -2);
uop_MEM_STORE_IMM_16(ir, IREG_SS_base, sp_reg, op_pc + 1);
SUB_SP(ir, 2);
uop_MOVZX(ir, IREG_pc, src_reg);
return -1;
case 0x20: /*JMP*/
uop_MOVZX(ir, IREG_pc, src_reg);
return -1;
case 0x28: /*JMP far*/
uop_MOVZX(ir, IREG_pc, src_reg);
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 2);
uop_LOAD_FUNC_ARG_REG(ir, 0, IREG_temp1_W);
uop_LOAD_FUNC_ARG_IMM(ir, 1, op_pc + 1);
uop_CALL_FUNC(ir, loadcsjmp);
return -1;
case 0x30: /*PUSH*/
if ((fetchdat & 0xc0) == 0xc0)
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
sp_reg = LOAD_SP_WITH_OFFSET(ir, -2);
uop_MEM_STORE_REG(ir, IREG_SS_base, sp_reg, src_reg);
SUB_SP(ir, 2);
return op_pc + 1;
default:
break;
}
return 0;
}
uint32_t
ropFF_32(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg = NULL;
int src_reg;
int sp_reg;
if ((fetchdat & 0x38) != 0x00 && (fetchdat & 0x38) != 0x08 && (fetchdat & 0x38) != 0x10 && (fetchdat & 0x38) != 0x20 && (fetchdat & 0x38) != 0x28 && (fetchdat & 0x38) != 0x30)
return 0;
codegen_mark_code_present(block, cs + op_pc, 1);
if ((fetchdat & 0xc0) == 0xc0) {
if ((fetchdat & 0x38) == 0x28)
return 0;
src_reg = IREG_32(fetchdat & 7);
} else {
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
if (!(fetchdat & 0x30)) /*INC/DEC*/
codegen_check_seg_write(block, ir, target_seg);
else
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0, ireg_seg_base(target_seg), IREG_eaaddr);
src_reg = IREG_temp0;
}
switch (fetchdat & 0x38) {
case 0x00: /*INC*/
rebuild_c(ir);
codegen_flags_changed = 1;
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOV(ir, IREG_flags_op1, src_reg);
uop_ADD_IMM(ir, src_reg, src_reg, 1);
uop_MOV(ir, IREG_flags_res, src_reg);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_INC32);
} else {
uop_ADD_IMM(ir, IREG_temp1, src_reg, 1);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1);
uop_MOV(ir, IREG_flags_op1, src_reg);
uop_MOV(ir, IREG_flags_res, IREG_temp1);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_INC32);
}
return op_pc + 1;
case 0x08: /*DEC*/
rebuild_c(ir);
codegen_flags_changed = 1;
if ((fetchdat & 0xc0) == 0xc0) {
uop_MOV(ir, IREG_flags_op1, src_reg);
uop_SUB_IMM(ir, src_reg, src_reg, 1);
uop_MOV(ir, IREG_flags_res, src_reg);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_DEC32);
} else {
uop_SUB_IMM(ir, IREG_temp1, src_reg, 1);
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp1);
uop_MOV(ir, IREG_flags_op1, src_reg);
uop_MOV(ir, IREG_flags_res, IREG_temp1);
uop_MOV_IMM(ir, IREG_flags_op2, 1);
uop_MOV_IMM(ir, IREG_flags_op, FLAGS_DEC32);
}
return op_pc + 1;
case 0x10: /*CALL*/
if ((fetchdat & 0xc0) == 0xc0)
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
sp_reg = LOAD_SP_WITH_OFFSET(ir, -4);
uop_MEM_STORE_IMM_32(ir, IREG_SS_base, sp_reg, op_pc + 1);
SUB_SP(ir, 4);
uop_MOV(ir, IREG_pc, src_reg);
return -1;
case 0x20: /*JMP*/
uop_MOV(ir, IREG_pc, src_reg);
return -1;
case 0x28: /*JMP far*/
uop_MOV(ir, IREG_pc, src_reg);
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 4);
uop_LOAD_FUNC_ARG_REG(ir, 0, IREG_temp1_W);
uop_LOAD_FUNC_ARG_IMM(ir, 1, op_pc + 1);
uop_CALL_FUNC(ir, loadcsjmp);
return -1;
case 0x30: /*PUSH*/
if ((fetchdat & 0xc0) == 0xc0)
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
sp_reg = LOAD_SP_WITH_OFFSET(ir, -4);
uop_MEM_STORE_REG(ir, IREG_SS_base, sp_reg, src_reg);
SUB_SP(ir, 4);
return op_pc + 1;
default:
break;
}
return 0;
}
uint32_t
ropNOP(UNUSED(codeblock_t *block), UNUSED(ir_data_t *ir), UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
return op_pc;
}
uint32_t
ropCBW(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_MOVSX(ir, IREG_AX, IREG_AL);
return op_pc;
}
uint32_t
ropCDQ(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_SAR_IMM(ir, IREG_EDX, IREG_EAX, 31);
return op_pc;
}
uint32_t
ropCWD(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_SAR_IMM(ir, IREG_DX, IREG_AX, 15);
return op_pc;
}
uint32_t
ropCWDE(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_MOVSX(ir, IREG_EAX, IREG_AX);
return op_pc;
}
#define ropLxS(name, seg) \
uint32_t rop##name##_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
{ \
x86seg *target_seg = NULL; \
int dest_reg = (fetchdat >> 3) & 7; \
\
if ((fetchdat & 0xc0) == 0xc0) \
return 0; \
\
codegen_mark_code_present(block, cs + op_pc, 1); \
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
codegen_check_seg_read(block, ir, target_seg); \
uop_MEM_LOAD_REG(ir, IREG_temp0_W, ireg_seg_base(target_seg), IREG_eaaddr); \
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 2); \
uop_LOAD_SEG(ir, seg, IREG_temp1_W); \
uop_MOV(ir, IREG_16(dest_reg), IREG_temp0_W); \
\
if (seg == &cpu_state.seg_ss) \
CPU_BLOCK_END(); \
\
return op_pc + 1; \
} \
uint32_t rop##name##_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc) \
{ \
x86seg *target_seg = NULL; \
int dest_reg = (fetchdat >> 3) & 7; \
\
if ((fetchdat & 0xc0) == 0xc0) \
return 0; \
\
codegen_mark_code_present(block, cs + op_pc, 1); \
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc); \
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0); \
codegen_check_seg_read(block, ir, target_seg); \
uop_MEM_LOAD_REG(ir, IREG_temp0, ireg_seg_base(target_seg), IREG_eaaddr); \
uop_MEM_LOAD_REG_OFFSET(ir, IREG_temp1_W, ireg_seg_base(target_seg), IREG_eaaddr, 4); \
uop_LOAD_SEG(ir, seg, IREG_temp1_W); \
uop_MOV(ir, IREG_32(dest_reg), IREG_temp0); \
\
if (seg == &cpu_state.seg_ss) \
CPU_BLOCK_END(); \
\
return op_pc + 1; \
}
ropLxS(LDS, &cpu_state.seg_ds)
ropLxS(LES, &cpu_state.seg_es)
ropLxS(LFS, &cpu_state.seg_fs)
ropLxS(LGS, &cpu_state.seg_gs)
ropLxS(LSS, &cpu_state.seg_ss)
uint32_t
ropCLC(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_CALL_FUNC(ir, flags_rebuild);
uop_AND_IMM(ir, IREG_flags, IREG_flags, ~C_FLAG);
return op_pc;
}
uint32_t
ropCMC(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_CALL_FUNC(ir, flags_rebuild);
uop_XOR_IMM(ir, IREG_flags, IREG_flags, C_FLAG);
return op_pc;
}
uint32_t
ropSTC(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_CALL_FUNC(ir, flags_rebuild);
uop_OR_IMM(ir, IREG_flags, IREG_flags, C_FLAG);
return op_pc;
}
uint32_t
ropCLD(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_AND_IMM(ir, IREG_flags, IREG_flags, ~D_FLAG);
return op_pc;
}
uint32_t
ropSTD(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
uop_OR_IMM(ir, IREG_flags, IREG_flags, D_FLAG);
return op_pc;
}
uint32_t
ropCLI(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
if (!IOPLp && (cr4 & (CR4_VME | CR4_PVI)))
return 0;
uop_AND_IMM(ir, IREG_flags, IREG_flags, ~I_FLAG);
return op_pc;
}
uint32_t
ropSTI(UNUSED(codeblock_t *block), ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, UNUSED(uint32_t op_32), uint32_t op_pc)
{
if (!IOPLp && (cr4 & (CR4_VME | CR4_PVI)))
return 0;
uop_OR_IMM(ir, IREG_flags, IREG_flags, I_FLAG);
return op_pc;
}
``` | /content/code_sandbox/src/codegen_new/codegen_ops_misc.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 6,940 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86_ops.h"
#include "codegen.h"
#include "x86.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include "386_common.h"
#include "codegen_accumulate.h"
#include "codegen_allocator.h"
#include "codegen_backend.h"
#include "codegen_ir.h"
#include "codegen_ops.h"
#include "codegen_ops_helpers.h"
#define MAX_INSTRUCTION_COUNT 50
static struct
{
uint32_t pc;
int op_ssegs;
x86seg *op_ea_seg;
uint32_t op_32;
int first_uop;
int TOP;
} codegen_instructions[MAX_INSTRUCTION_COUNT];
int
codegen_get_instruction_uop(codeblock_t *block, uint32_t pc, int *first_instruction, int *TOP)
{
for (uint8_t c = 0; c <= block->ins; c++) {
if (codegen_instructions[c].pc == pc) {
*first_instruction = c;
*TOP = codegen_instructions[c].TOP;
return codegen_instructions[c].first_uop;
}
}
*first_instruction = block->ins;
return -1;
}
void
codegen_set_loop_start(ir_data_t *ir, int first_instruction)
{
uop_MOV_IMM(ir, IREG_op32, codegen_instructions[first_instruction].op_32);
uop_MOV_PTR(ir, IREG_ea_seg, (void *) codegen_instructions[first_instruction].op_ea_seg);
uop_MOV_IMM(ir, IREG_ssegs, codegen_instructions[first_instruction].op_ssegs);
}
int has_ea;
codeblock_t *codeblock;
uint16_t *codeblock_hash;
void (*codegen_timing_start)(void);
void (*codegen_timing_prefix)(uint8_t prefix, uint32_t fetchdat);
void (*codegen_timing_opcode)(uint8_t opcode, uint32_t fetchdat, int op_32, uint32_t op_pc);
void (*codegen_timing_block_start)(void);
void (*codegen_timing_block_end)(void);
int (*codegen_timing_jump_cycles)(void);
void
codegen_timing_set(codegen_timing_t *timing)
{
codegen_timing_start = timing->start;
codegen_timing_prefix = timing->prefix;
codegen_timing_opcode = timing->opcode;
codegen_timing_block_start = timing->block_start;
codegen_timing_block_end = timing->block_end;
codegen_timing_jump_cycles = timing->jump_cycles;
}
int codegen_in_recompile;
static int last_op_ssegs;
static x86seg *last_op_ea_seg;
static uint32_t last_op_32;
void
codegen_generate_reset(void)
{
last_op_ssegs = -1;
last_op_ea_seg = NULL;
last_op_32 = -1;
has_ea = 0;
}
void
codegen_check_seg_read(UNUSED(codeblock_t *block), ir_data_t *ir, x86seg *seg)
{
/*Segments always valid in real/V86 mode*/
if (!(cr0 & 1) || (cpu_state.eflags & VM_FLAG))
return;
/*CS and SS must always be valid*/
if (seg == &cpu_state.seg_cs || seg == &cpu_state.seg_ss)
return;
if (seg->checked)
return;
if (seg == &cpu_state.seg_ds && codegen_flat_ds && !(cpu_cur_status & CPU_STATUS_NOTFLATDS))
return;
uop_CMP_IMM_JZ(ir, ireg_seg_base(seg), (uint32_t) -1, codegen_gpf_rout);
seg->checked = 1;
}
void
codegen_check_seg_write(UNUSED(codeblock_t *block), ir_data_t *ir, x86seg *seg)
{
/*Segments always valid in real/V86 mode*/
if (!(cr0 & 1) || (cpu_state.eflags & VM_FLAG))
return;
/*CS and SS must always be valid*/
if (seg == &cpu_state.seg_cs || seg == &cpu_state.seg_ss)
return;
if (seg->checked)
return;
if (seg == &cpu_state.seg_ds && codegen_flat_ds && !(cpu_cur_status & CPU_STATUS_NOTFLATDS))
return;
uop_CMP_IMM_JZ(ir, ireg_seg_base(seg), (uint32_t) -1, codegen_gpf_rout);
seg->checked = 1;
}
static x86seg *
codegen_generate_ea_16_long(ir_data_t *ir, x86seg *op_ea_seg, uint32_t fetchdat, int op_ssegs, uint32_t *op_pc)
{
uint32_t old_pc = (*op_pc) + 1;
if (!cpu_mod && cpu_rm == 6) {
uint16_t addr = (fetchdat >> 8) & 0xffff;
uop_MOV_IMM(ir, IREG_eaaddr, addr);
(*op_pc) += 2;
} else {
int base_reg;
int index_reg;
int offset;
switch (cpu_rm & 7) {
default:
case 0:
case 1:
case 7:
base_reg = IREG_EBX;
break;
case 2:
case 3:
case 6:
base_reg = IREG_EBP;
break;
case 4:
base_reg = IREG_ESI;
break;
case 5:
base_reg = IREG_EDI;
break;
}
uop_MOV(ir, IREG_eaaddr, base_reg);
if (!(cpu_rm & 4)) {
if (!(cpu_rm & 1))
index_reg = IREG_ESI;
else
index_reg = IREG_EDI;
uop_ADD(ir, IREG_eaaddr, IREG_eaaddr, index_reg);
}
switch (cpu_mod) {
case 1:
offset = (int) (int8_t) ((fetchdat >> 8) & 0xff);
uop_ADD_IMM(ir, IREG_eaaddr, IREG_eaaddr, offset);
(*op_pc)++;
break;
case 2:
offset = (fetchdat >> 8) & 0xffff;
uop_ADD_IMM(ir, IREG_eaaddr, IREG_eaaddr, offset);
(*op_pc) += 2;
break;
default:
break;
}
uop_AND_IMM(ir, IREG_eaaddr, IREG_eaaddr, 0xffff);
if (mod1seg[cpu_rm] == &ss && !op_ssegs) {
op_ea_seg = &cpu_state.seg_ss;
}
}
codegen_mark_code_present(ir->block, cs + old_pc, ((*op_pc) + 1) - old_pc);
return op_ea_seg;
}
static x86seg *
codegen_generate_ea_32_long(ir_data_t *ir, x86seg *op_ea_seg, uint32_t fetchdat, int op_ssegs, uint32_t *op_pc, int stack_offset)
{
codeblock_t *block = ir->block;
uint32_t old_pc = (*op_pc) + 1;
uint32_t new_eaaddr;
int extra_bytes = 0;
if (cpu_rm == 4) {
uint8_t sib = fetchdat >> 8;
(*op_pc)++;
switch (cpu_mod) {
case 0:
if ((sib & 7) == 5) {
if (block->flags & CODEBLOCK_NO_IMMEDIATES) {
LOAD_IMMEDIATE_FROM_RAM_32(block, ir, IREG_eaaddr, cs + (*op_pc) + 1);
extra_bytes = 1;
} else {
new_eaaddr = fastreadl(cs + (*op_pc) + 1);
uop_MOV_IMM(ir, IREG_eaaddr, new_eaaddr);
extra_bytes = 5;
}
(*op_pc) += 4;
} else {
uop_MOV(ir, IREG_eaaddr, sib & 7);
extra_bytes = 1;
}
break;
case 1:
new_eaaddr = (uint32_t) (int8_t) ((fetchdat >> 16) & 0xff);
uop_MOV_IMM(ir, IREG_eaaddr, new_eaaddr);
uop_ADD(ir, IREG_eaaddr, IREG_eaaddr, sib & 7);
(*op_pc)++;
extra_bytes = 2;
break;
case 2:
if (block->flags & CODEBLOCK_NO_IMMEDIATES) {
LOAD_IMMEDIATE_FROM_RAM_32(block, ir, IREG_eaaddr, cs + (*op_pc) + 1);
extra_bytes = 1;
} else {
new_eaaddr = fastreadl(cs + (*op_pc) + 1);
uop_MOV_IMM(ir, IREG_eaaddr, new_eaaddr);
extra_bytes = 5;
}
(*op_pc) += 4;
uop_ADD(ir, IREG_eaaddr, IREG_eaaddr, sib & 7);
break;
default:
break;
}
if (stack_offset && (sib & 7) == 4 && (cpu_mod || (sib & 7) != 5)) { /*ESP*/
uop_ADD_IMM(ir, IREG_eaaddr, IREG_eaaddr, stack_offset);
#if 0
addbyte(0x05);
addlong(stack_offset);
#endif
}
if (((sib & 7) == 4 || (cpu_mod && (sib & 7) == 5)) && !op_ssegs)
op_ea_seg = &cpu_state.seg_ss;
if (((sib >> 3) & 7) != 4) {
switch (sib >> 6) {
case 0:
uop_ADD(ir, IREG_eaaddr, IREG_eaaddr, (sib >> 3) & 7);
break;
case 1:
uop_ADD_LSHIFT(ir, IREG_eaaddr, IREG_eaaddr, (sib >> 3) & 7, 1);
break;
case 2:
uop_ADD_LSHIFT(ir, IREG_eaaddr, IREG_eaaddr, (sib >> 3) & 7, 2);
break;
case 3:
uop_ADD_LSHIFT(ir, IREG_eaaddr, IREG_eaaddr, (sib >> 3) & 7, 3);
break;
default:
break;
}
}
} else {
if (!cpu_mod && cpu_rm == 5) {
if (block->flags & CODEBLOCK_NO_IMMEDIATES) {
LOAD_IMMEDIATE_FROM_RAM_32(block, ir, IREG_eaaddr, cs + (*op_pc) + 1);
} else {
new_eaaddr = fastreadl(cs + (*op_pc) + 1);
uop_MOV_IMM(ir, IREG_eaaddr, new_eaaddr);
extra_bytes = 4;
}
(*op_pc) += 4;
} else {
uop_MOV(ir, IREG_eaaddr, cpu_rm);
if (cpu_mod) {
if (cpu_rm == 5 && !op_ssegs)
op_ea_seg = &cpu_state.seg_ss;
if (cpu_mod == 1) {
uop_ADD_IMM(ir, IREG_eaaddr, IREG_eaaddr, (uint32_t) (int8_t) (fetchdat >> 8));
(*op_pc)++;
extra_bytes = 1;
} else {
if (block->flags & CODEBLOCK_NO_IMMEDIATES) {
LOAD_IMMEDIATE_FROM_RAM_32(block, ir, IREG_temp0, cs + (*op_pc) + 1);
uop_ADD(ir, IREG_eaaddr, IREG_eaaddr, IREG_temp0);
} else {
new_eaaddr = fastreadl(cs + (*op_pc) + 1);
uop_ADD_IMM(ir, IREG_eaaddr, IREG_eaaddr, new_eaaddr);
extra_bytes = 4;
}
(*op_pc) += 4;
}
}
}
}
if (extra_bytes)
codegen_mark_code_present(ir->block, cs + old_pc, extra_bytes);
return op_ea_seg;
}
x86seg *
codegen_generate_ea(ir_data_t *ir, x86seg *op_ea_seg, uint32_t fetchdat, int op_ssegs, uint32_t *op_pc, uint32_t op_32, int stack_offset)
{
cpu_mod = (fetchdat >> 6) & 3;
cpu_reg = (fetchdat >> 3) & 7;
cpu_rm = fetchdat & 7;
if ((fetchdat & 0xc0) == 0xc0)
return NULL;
if (op_32 & 0x200)
return codegen_generate_ea_32_long(ir, op_ea_seg, fetchdat, op_ssegs, op_pc, stack_offset);
return codegen_generate_ea_16_long(ir, op_ea_seg, fetchdat, op_ssegs, op_pc);
}
// clang-format off
static uint8_t opcode_modrm[256] = {
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*00*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*10*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*20*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*30*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*40*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*50*/
0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, /*60*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*70*/
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /*80*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*90*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*a0*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*b0*/
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /*c0*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, /*d0*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*e0*/
0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, /*f0*/
};
static uint8_t opcode_0f_modrm[256] = {
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, /*00*/
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, /*10*/
1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*20*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, /*30*/
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /*40*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*50*/
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, /*60*/
0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, /*70*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*80*/
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /*90*/
0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, /*a0*/
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, /*b0*/
1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, /*c0*/
0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, /*d0*/
0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, /*e0*/
0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0 /*f0*/
};
// clang-format on
void
codegen_generate_call(uint8_t opcode, OpFn op, uint32_t fetchdat, uint32_t new_pc, uint32_t old_pc)
{
codeblock_t *block = &codeblock[block_current];
ir_data_t *ir = codegen_get_ir_data();
uint32_t op_pc = new_pc;
const OpFn *op_table = x86_dynarec_opcodes;
RecompOpFn *recomp_op_table = recomp_opcodes;
int opcode_shift = 0;
int opcode_mask = 0x3ff;
uint32_t recomp_opcode_mask = 0x1ff;
uint32_t op_32 = use32;
int over = 0;
int test_modrm = 1;
int pc_off = 0;
uint32_t next_pc = 0;
#ifdef DEBUG_EXTRA
uint8_t last_prefix = 0;
#endif
op_ea_seg = &cpu_state.seg_ds;
op_ssegs = 0;
codegen_timing_start();
while (!over) {
switch (opcode) {
case 0x0f:
#ifdef DEBUG_EXTRA
last_prefix = 0x0f;
#endif
op_table = x86_dynarec_opcodes_0f;
recomp_op_table = fpu_softfloat ? recomp_opcodes_0f_no_mmx : recomp_opcodes_0f;
over = 1;
break;
case 0x26: /*ES:*/
op_ea_seg = &cpu_state.seg_es;
op_ssegs = 1;
break;
case 0x2e: /*CS:*/
op_ea_seg = &cpu_state.seg_cs;
op_ssegs = 1;
break;
case 0x36: /*SS:*/
op_ea_seg = &cpu_state.seg_ss;
op_ssegs = 1;
break;
case 0x3e: /*DS:*/
op_ea_seg = &cpu_state.seg_ds;
op_ssegs = 1;
break;
case 0x64: /*FS:*/
op_ea_seg = &cpu_state.seg_fs;
op_ssegs = 1;
break;
case 0x65: /*GS:*/
op_ea_seg = &cpu_state.seg_gs;
op_ssegs = 1;
break;
case 0x66: /*Data size select*/
op_32 = ((use32 & 0x100) ^ 0x100) | (op_32 & 0x200);
break;
case 0x67: /*Address size select*/
op_32 = ((use32 & 0x200) ^ 0x200) | (op_32 & 0x100);
break;
case 0xd8:
#ifdef DEBUG_EXTRA
last_prefix = 0xd8;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_d8_a32 : x86_dynarec_opcodes_d8_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_d8;
opcode_shift = 3;
opcode_mask = 0x1f;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xd9:
#ifdef DEBUG_EXTRA
last_prefix = 0xd9;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_d9_a32 : x86_dynarec_opcodes_d9_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_d9;
opcode_mask = 0xff;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xda:
#ifdef DEBUG_EXTRA
last_prefix = 0xda;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_da_a32 : x86_dynarec_opcodes_da_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_da;
opcode_mask = 0xff;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xdb:
#ifdef DEBUG_EXTRA
last_prefix = 0xdb;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_db_a32 : x86_dynarec_opcodes_db_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_db;
opcode_mask = 0xff;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xdc:
#ifdef DEBUG_EXTRA
last_prefix = 0xdc;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_dc_a32 : x86_dynarec_opcodes_dc_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_dc;
opcode_shift = 3;
opcode_mask = 0x1f;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xdd:
#ifdef DEBUG_EXTRA
last_prefix = 0xdd;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_dd_a32 : x86_dynarec_opcodes_dd_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_dd;
opcode_mask = 0xff;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xde:
#ifdef DEBUG_EXTRA
last_prefix = 0xde;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_de_a32 : x86_dynarec_opcodes_de_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_de;
opcode_mask = 0xff;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xdf:
#ifdef DEBUG_EXTRA
last_prefix = 0xdf;
#endif
op_table = (op_32 & 0x200) ? x86_dynarec_opcodes_df_a32 : x86_dynarec_opcodes_df_a16;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_df;
opcode_mask = 0xff;
over = 1;
pc_off = -1;
test_modrm = 0;
block->flags |= CODEBLOCK_HAS_FPU;
break;
case 0xf0: /*LOCK*/
break;
case 0xf2: /*REPNE*/
#ifdef DEBUG_EXTRA
last_prefix = 0xf2;
#endif
op_table = x86_dynarec_opcodes_REPNE;
recomp_op_table = NULL; // recomp_opcodes_REPNE;
break;
case 0xf3: /*REPE*/
#ifdef DEBUG_EXTRA
last_prefix = 0xf3;
#endif
op_table = x86_dynarec_opcodes_REPE;
recomp_op_table = NULL; // recomp_opcodes_REPE;
break;
default:
goto generate_call;
}
fetchdat = fastreadl(cs + op_pc);
codegen_timing_prefix(opcode, fetchdat);
if (cpu_state.abrt)
return;
opcode = fetchdat & 0xff;
if (!pc_off)
fetchdat >>= 8;
op_pc++;
}
generate_call:
codegen_instructions[block->ins].pc = cpu_state.oldpc;
codegen_instructions[block->ins].op_ssegs = last_op_ssegs;
codegen_instructions[block->ins].op_ea_seg = last_op_ea_seg;
codegen_instructions[block->ins].op_32 = last_op_32;
codegen_instructions[block->ins].TOP = cpu_state.TOP;
codegen_instructions[block->ins].first_uop = ir->wr_pos;
codegen_timing_opcode(opcode, fetchdat, op_32, op_pc);
codegen_accumulate(ir, ACCREG_cycles, -codegen_block_cycles);
codegen_block_cycles = 0;
if ((op_table == x86_dynarec_opcodes && ((opcode & 0xf0) == 0x70 || (opcode & 0xfc) == 0xe0 || opcode == 0xc2 || (opcode & 0xfe) == 0xca || (opcode & 0xfc) == 0xcc || (opcode & 0xfc) == 0xe8 || (opcode == 0xff && ((fetchdat & 0x38) >= 0x10 && (fetchdat & 0x38) < 0x30)))) || (op_table == x86_dynarec_opcodes_0f && ((opcode & 0xf0) == 0x80))) {
/*On some CPUs (eg K6), a jump/branch instruction may be able to pair with
subsequent instructions, so no cycles may have been deducted for it yet.
To prevent having zero cycle blocks (eg with a jump instruction pointing
to itself), apply the cycles that would be taken if this jump is taken,
then reverse it for subsequent instructions if the jump is not taken*/
int jump_cycles = 0;
if (codegen_timing_jump_cycles)
codegen_timing_jump_cycles();
if (jump_cycles)
codegen_accumulate(ir, ACCREG_cycles, -jump_cycles);
codegen_accumulate_flush(ir);
if (jump_cycles)
codegen_accumulate(ir, ACCREG_cycles, jump_cycles);
}
if (op_table == x86_dynarec_opcodes_0f && opcode == 0x0f) {
/*3DNow opcodes are stored after ModR/M, SIB and any offset*/
uint8_t modrm = fetchdat & 0xff;
uint8_t sib = (fetchdat >> 8) & 0xff;
uint32_t opcode_pc = op_pc + 1;
uint8_t opcode_3dnow;
if ((modrm & 0xc0) != 0xc0) {
if (op_32 & 0x200) {
if ((modrm & 7) == 4) {
/* Has SIB*/
opcode_pc++;
if ((modrm & 0xc0) == 0x40)
opcode_pc++;
else if ((modrm & 0xc0) == 0x80)
opcode_pc += 4;
else if ((sib & 0x07) == 0x05)
opcode_pc += 4;
} else {
if ((modrm & 0xc0) == 0x40)
opcode_pc++;
else if ((modrm & 0xc0) == 0x80)
opcode_pc += 4;
else if ((modrm & 0xc7) == 0x05)
opcode_pc += 4;
}
} else {
if ((modrm & 0xc0) == 0x40)
opcode_pc++;
else if ((modrm & 0xc0) == 0x80)
opcode_pc += 2;
else if ((modrm & 0xc7) == 0x06)
opcode_pc += 2;
}
}
opcode_3dnow = fastreadb(cs + opcode_pc);
if (!fpu_softfloat && recomp_opcodes_3DNOW[opcode_3dnow]) {
next_pc = opcode_pc + 1;
op_table = x86_dynarec_opcodes_3DNOW;
recomp_op_table = fpu_softfloat ? NULL : recomp_opcodes_3DNOW;
opcode = opcode_3dnow;
recomp_opcode_mask = 0xff;
opcode_mask = 0xff;
}
}
codegen_mark_code_present(block, cs + old_pc, (op_pc - old_pc) - pc_off);
/* It is apparently a prefixed instruction. */
#if 0
if ((recomp_op_table == recomp_opcodes) && (opcode == 0x48))
goto codegen_skip;
#endif
if (recomp_op_table && recomp_op_table[(opcode | op_32) & recomp_opcode_mask]) {
uint32_t new_pc = recomp_op_table[(opcode | op_32) & recomp_opcode_mask](block, ir, opcode, fetchdat, op_32, op_pc);
if (new_pc) {
if (new_pc != -1)
uop_MOV_IMM(ir, IREG_pc, new_pc);
codegen_endpc = (cs + cpu_state.pc) + 8;
block->ins++;
if (block->ins >= MAX_INSTRUCTION_COUNT)
CPU_BLOCK_END();
return;
}
}
// codegen_skip:
if ((op_table == x86_dynarec_opcodes_REPNE || op_table == x86_dynarec_opcodes_REPE) && !op_table[opcode | op_32]) {
op_table = x86_dynarec_opcodes;
recomp_op_table = recomp_opcodes;
}
op = op_table[((opcode >> opcode_shift) | op_32) & opcode_mask];
if (!test_modrm || (op_table == x86_dynarec_opcodes && opcode_modrm[opcode]) || (op_table == x86_dynarec_opcodes_0f && opcode_0f_modrm[opcode]) || (op_table == x86_dynarec_opcodes_3DNOW)) {
int stack_offset = 0;
if (op_table == x86_dynarec_opcodes && opcode == 0x8f) /*POP*/
stack_offset = (op_32 & 0x100) ? 4 : 2;
cpu_mod = (fetchdat >> 6) & 3;
cpu_reg = (fetchdat >> 3) & 7;
cpu_rm = fetchdat & 7;
uop_MOV_IMM(ir, IREG_rm_mod_reg, cpu_rm | (cpu_mod << 8) | (cpu_reg << 16));
op_pc += pc_off;
if (cpu_mod != 3 && !(op_32 & 0x200)) {
op_ea_seg = codegen_generate_ea_16_long(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc);
}
if (cpu_mod != 3 && (op_32 & 0x200)) {
op_ea_seg = codegen_generate_ea_32_long(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, stack_offset);
}
op_pc -= pc_off;
}
#ifdef DEBUG_EXTRA
uop_LOG_INSTR(ir, opcode | (last_prefix << 8));
#endif
codegen_accumulate_flush(ir);
if (op_table == x86_dynarec_opcodes_3DNOW)
uop_MOV_IMM(ir, IREG_pc, next_pc);
else
uop_MOV_IMM(ir, IREG_pc, op_pc + pc_off);
uop_MOV_IMM(ir, IREG_oldpc, old_pc);
if (op_32 != last_op_32)
uop_MOV_IMM(ir, IREG_op32, op_32);
if (op_ea_seg != last_op_ea_seg)
uop_MOV_PTR(ir, IREG_ea_seg, (void *) op_ea_seg);
if (op_ssegs != last_op_ssegs)
uop_MOV_IMM(ir, IREG_ssegs, op_ssegs);
uop_LOAD_FUNC_ARG_IMM(ir, 0, fetchdat);
uop_CALL_INSTRUCTION_FUNC(ir, op);
codegen_flags_changed = 0;
codegen_mark_code_present(block, cs + cpu_state.pc, 8);
last_op_32 = op_32;
last_op_ea_seg = op_ea_seg;
last_op_ssegs = op_ssegs;
#if 0
codegen_block_ins++;
#endif
block->ins++;
if (block->ins >= MAX_INSTRUCTION_COUNT)
CPU_BLOCK_END();
codegen_endpc = (cs + cpu_state.pc) + 8;
#if 0
if (has_ea)
fatal("Has EA\n");
#endif
}
``` | /content/code_sandbox/src/codegen_new/codegen.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 8,642 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "codegen.h"
#include "codegen_accumulate.h"
#include "codegen_ir.h"
static struct
{
int count;
int dest_reg;
} acc_regs[] = {
[ACCREG_cycles] = {0, IREG_cycles}
};
void
codegen_accumulate(UNUSED(ir_data_t *ir), int acc_reg, int delta)
{
acc_regs[acc_reg].count += delta;
#ifdef USE_ACYCS
if ((acc_reg == ACCREG_cycles) && (delta != 0)) {
uop_ADD_IMM(ir, IREG_acycs, IREG_acycs, -delta);
}
#endif
}
void
codegen_accumulate_flush(ir_data_t *ir)
{
if (acc_regs[0].count) {
uop_ADD_IMM(ir, acc_regs[0].dest_reg, acc_regs[0].dest_reg, acc_regs[0].count);
}
acc_regs[0].count = 0;
}
void
codegen_accumulate_reset(void)
{
acc_regs[0].count = 0;
}
``` | /content/code_sandbox/src/codegen_new/codegen_accumulate.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 262 |
```objective-c
uint32_t ropJB_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJB_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJB_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNB_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNB_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNB_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJBE_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJBE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJBE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNBE_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNBE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNBE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJE_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNE_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJL_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJL_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJL_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNL_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNL_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNL_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJLE_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJLE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJLE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNLE_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNLE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNLE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJO_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJO_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJO_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNO_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNO_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNO_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJP_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJP_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJP_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNP_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNP_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNP_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJS_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNS_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJNS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropJCXZ(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropLOOP(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropLOOPE(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropLOOPNE(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
``` | /content/code_sandbox/src/codegen_new/codegen_ops_branch.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,273 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_flags.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include "386_common.h"
#include "x87_sf.h"
#include "x87.h"
#include "codegen.h"
#include "codegen_accumulate.h"
#include "codegen_ir.h"
#include "codegen_ops.h"
#include "codegen_ops_fpu_arith.h"
#include "codegen_ops_helpers.h"
uint32_t
ropFLDs(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_SINGLE(ir, IREG_ST(-1), ireg_seg_base(target_seg), IREG_eaaddr);
uop_MOV_IMM(ir, IREG_tag(-1), TAG_VALID);
fpu_PUSH(block, ir);
return op_pc + 1;
}
uint32_t
ropFLDd(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_DOUBLE(ir, IREG_ST(-1), ireg_seg_base(target_seg), IREG_eaaddr);
uop_MOV_IMM(ir, IREG_tag(-1), TAG_VALID);
fpu_PUSH(block, ir);
return op_pc + 1;
}
uint32_t
ropFSTs(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MEM_STORE_SINGLE(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_ST(0));
return op_pc + 1;
}
uint32_t
ropFSTPs(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MEM_STORE_SINGLE(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_ST(0));
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
fpu_POP(block, ir);
return op_pc + 1;
}
uint32_t
ropFSTd(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
CHECK_SEG_LIMITS(block, ir, target_seg, IREG_eaaddr, 7);
uop_MEM_STORE_DOUBLE(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_ST(0));
return op_pc + 1;
}
uint32_t
ropFSTPd(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
CHECK_SEG_LIMITS(block, ir, target_seg, IREG_eaaddr, 7);
uop_MEM_STORE_DOUBLE(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_ST(0));
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
fpu_POP(block, ir);
return op_pc + 1;
}
uint32_t
ropFILDw(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0_W, ireg_seg_base(target_seg), IREG_eaaddr);
uop_MOV_DOUBLE_INT(ir, IREG_ST(-1), IREG_temp0_W);
uop_MOV_IMM(ir, IREG_tag(-1), TAG_VALID);
fpu_PUSH(block, ir);
return op_pc + 1;
}
uint32_t
ropFILDl(codeblock_t *block, ir_data_t *ir, uint8_t UNUSED(opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_temp0, ireg_seg_base(target_seg), IREG_eaaddr);
uop_MOV_DOUBLE_INT(ir, IREG_ST(-1), IREG_temp0);
uop_MOV_IMM(ir, IREG_tag(-1), TAG_VALID);
fpu_PUSH(block, ir);
return op_pc + 1;
}
uint32_t
ropFILDq(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_read(block, ir, target_seg);
uop_MEM_LOAD_REG(ir, IREG_ST_i64(-1), ireg_seg_base(target_seg), IREG_eaaddr);
uop_MOV_DOUBLE_INT(ir, IREG_ST(-1), IREG_ST_i64(-1));
uop_MOV_IMM(ir, IREG_tag(-1), TAG_VALID | TAG_UINT64);
fpu_PUSH(block, ir);
return op_pc + 1;
}
uint32_t
ropFISTw(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MOV_INT_DOUBLE(ir, IREG_temp0_W, IREG_ST(0));
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp0_W);
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
return op_pc + 1;
}
uint32_t
ropFISTPw(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MOV_INT_DOUBLE(ir, IREG_temp0_W, IREG_ST(0));
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp0_W);
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
fpu_POP(block, ir);
return op_pc + 1;
}
uint32_t
ropFISTl(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MOV_INT_DOUBLE(ir, IREG_temp0, IREG_ST(0));
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp0);
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
return op_pc + 1;
}
uint32_t
ropFISTPl(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MOV_INT_DOUBLE(ir, IREG_temp0, IREG_ST(0));
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp0);
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
fpu_POP(block, ir);
return op_pc + 1;
}
uint32_t
ropFISTPq(codeblock_t *block, ir_data_t *ir, UNUSED(uint8_t opcode), uint32_t fetchdat, uint32_t op_32, uint32_t op_pc)
{
x86seg *target_seg;
uop_FP_ENTER(ir);
uop_MOV_IMM(ir, IREG_oldpc, cpu_state.oldpc);
op_pc--;
target_seg = codegen_generate_ea(ir, op_ea_seg, fetchdat, op_ssegs, &op_pc, op_32, 0);
codegen_check_seg_write(block, ir, target_seg);
uop_MOV_INT_DOUBLE_64(ir, IREG_temp0_Q, IREG_ST(0), IREG_ST_i64(0), IREG_tag(0));
uop_MEM_STORE_REG(ir, ireg_seg_base(target_seg), IREG_eaaddr, IREG_temp0_Q);
uop_MOV_IMM(ir, IREG_tag(0), TAG_EMPTY);
fpu_POP(block, ir);
return op_pc + 1;
}
``` | /content/code_sandbox/src/codegen_new/codegen_ops_fpu_loadstore.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,943 |
```c
#include <stdint.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "codegen.h"
#include "codegen_backend.h"
#include "codegen_ir_defs.h"
#include "codegen_reg.h"
int max_version_refcount;
uint16_t reg_dead_list = 0;
uint8_t reg_last_version[IREG_COUNT];
reg_version_t reg_version[IREG_COUNT][256];
ir_reg_t invalid_ir_reg = { IREG_INVALID };
ir_reg_t _host_regs[CODEGEN_HOST_REGS];
static uint8_t _host_reg_dirty[CODEGEN_HOST_REGS];
ir_reg_t host_fp_regs[CODEGEN_HOST_FP_REGS];
static uint8_t host_fp_reg_dirty[CODEGEN_HOST_FP_REGS];
typedef struct host_reg_set_t {
ir_reg_t *regs;
uint8_t *dirty;
host_reg_def_t *reg_list;
uint16_t locked;
int nr_regs;
} host_reg_set_t;
static host_reg_set_t host_reg_set;
static host_reg_set_t host_fp_reg_set;
enum {
REG_BYTE,
REG_WORD,
REG_DWORD,
REG_QWORD,
REG_POINTER,
REG_DOUBLE,
REG_FPU_ST_BYTE,
REG_FPU_ST_DOUBLE,
REG_FPU_ST_QWORD
};
enum {
REG_INTEGER,
REG_FP
};
enum {
/*Register may be accessed outside of code block, and must be written
back before any control transfers*/
REG_PERMANENT = 0,
/*Register will not be accessed outside of code block, and does not need
to be written back if there are no readers remaining*/
REG_VOLATILE = 1
};
struct
{
int native_size;
void *p;
int type;
int is_volatile;
} ireg_data[IREG_COUNT] = {
[IREG_EAX] = {REG_DWORD, &EAX, REG_INTEGER, REG_PERMANENT},
[IREG_ECX] = { REG_DWORD, &ECX, REG_INTEGER, REG_PERMANENT},
[IREG_EDX] = { REG_DWORD, &EDX, REG_INTEGER, REG_PERMANENT},
[IREG_EBX] = { REG_DWORD, &EBX, REG_INTEGER, REG_PERMANENT},
[IREG_ESP] = { REG_DWORD, &ESP, REG_INTEGER, REG_PERMANENT},
[IREG_EBP] = { REG_DWORD, &EBP, REG_INTEGER, REG_PERMANENT},
[IREG_ESI] = { REG_DWORD, &ESI, REG_INTEGER, REG_PERMANENT},
[IREG_EDI] = { REG_DWORD, &EDI, REG_INTEGER, REG_PERMANENT},
[IREG_flags_op] = { REG_DWORD, &cpu_state.flags_op, REG_INTEGER, REG_PERMANENT},
[IREG_flags_res] = { REG_DWORD, &cpu_state.flags_res, REG_INTEGER, REG_PERMANENT},
[IREG_flags_op1] = { REG_DWORD, &cpu_state.flags_op1, REG_INTEGER, REG_PERMANENT},
[IREG_flags_op2] = { REG_DWORD, &cpu_state.flags_op2, REG_INTEGER, REG_PERMANENT},
[IREG_pc] = { REG_DWORD, &cpu_state.pc, REG_INTEGER, REG_PERMANENT},
[IREG_oldpc] = { REG_DWORD, &cpu_state.oldpc, REG_INTEGER, REG_PERMANENT},
[IREG_eaaddr] = { REG_DWORD, &cpu_state.eaaddr, REG_INTEGER, REG_PERMANENT},
[IREG_ea_seg] = { REG_POINTER, &cpu_state.ea_seg, REG_INTEGER, REG_PERMANENT},
[IREG_op32] = { REG_DWORD, &cpu_state.op32, REG_INTEGER, REG_PERMANENT},
[IREG_ssegsx] = { REG_BYTE, &cpu_state.ssegs, REG_INTEGER, REG_PERMANENT},
[IREG_rm_mod_reg] = { REG_DWORD, &cpu_state.rm_data.rm_mod_reg_data, REG_INTEGER, REG_PERMANENT},
#ifdef USE_ACYCS
[IREG_acycs] = { REG_DWORD, &acycs, REG_INTEGER, REG_PERMANENT},
#endif
[IREG_cycles] = { REG_DWORD, &cpu_state._cycles, REG_INTEGER, REG_PERMANENT},
[IREG_CS_base] = { REG_DWORD, &cpu_state.seg_cs.base, REG_INTEGER, REG_PERMANENT},
[IREG_DS_base] = { REG_DWORD, &cpu_state.seg_ds.base, REG_INTEGER, REG_PERMANENT},
[IREG_ES_base] = { REG_DWORD, &cpu_state.seg_es.base, REG_INTEGER, REG_PERMANENT},
[IREG_FS_base] = { REG_DWORD, &cpu_state.seg_fs.base, REG_INTEGER, REG_PERMANENT},
[IREG_GS_base] = { REG_DWORD, &cpu_state.seg_gs.base, REG_INTEGER, REG_PERMANENT},
[IREG_SS_base] = { REG_DWORD, &cpu_state.seg_ss.base, REG_INTEGER, REG_PERMANENT},
[IREG_CS_seg] = { REG_WORD, &cpu_state.seg_cs.seg, REG_INTEGER, REG_PERMANENT},
[IREG_DS_seg] = { REG_WORD, &cpu_state.seg_ds.seg, REG_INTEGER, REG_PERMANENT},
[IREG_ES_seg] = { REG_WORD, &cpu_state.seg_es.seg, REG_INTEGER, REG_PERMANENT},
[IREG_FS_seg] = { REG_WORD, &cpu_state.seg_fs.seg, REG_INTEGER, REG_PERMANENT},
[IREG_GS_seg] = { REG_WORD, &cpu_state.seg_gs.seg, REG_INTEGER, REG_PERMANENT},
[IREG_SS_seg] = { REG_WORD, &cpu_state.seg_ss.seg, REG_INTEGER, REG_PERMANENT},
[IREG_FPU_TOP] = { REG_DWORD, &cpu_state.TOP, REG_INTEGER, REG_PERMANENT},
[IREG_ST0] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST1] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST2] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST3] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST4] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST5] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST6] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_ST7] = { REG_FPU_ST_DOUBLE, &cpu_state.ST[0], REG_FP, REG_PERMANENT},
[IREG_tag0] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag1] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag2] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag3] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag4] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag5] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag6] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_tag7] = { REG_FPU_ST_BYTE, &cpu_state.tag[0], REG_INTEGER, REG_PERMANENT},
[IREG_ST0_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST1_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST2_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST3_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST4_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST5_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST6_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_ST7_i64] = { REG_FPU_ST_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_MM0x] = { REG_QWORD, &cpu_state.MM[0], REG_FP, REG_PERMANENT},
[IREG_MM1x] = { REG_QWORD, &cpu_state.MM[1], REG_FP, REG_PERMANENT},
[IREG_MM2x] = { REG_QWORD, &cpu_state.MM[2], REG_FP, REG_PERMANENT},
[IREG_MM3x] = { REG_QWORD, &cpu_state.MM[3], REG_FP, REG_PERMANENT},
[IREG_MM4x] = { REG_QWORD, &cpu_state.MM[4], REG_FP, REG_PERMANENT},
[IREG_MM5x] = { REG_QWORD, &cpu_state.MM[5], REG_FP, REG_PERMANENT},
[IREG_MM6x] = { REG_QWORD, &cpu_state.MM[6], REG_FP, REG_PERMANENT},
[IREG_MM7x] = { REG_QWORD, &cpu_state.MM[7], REG_FP, REG_PERMANENT},
[IREG_NPXCx] = { REG_WORD, &cpu_state.npxc, REG_INTEGER, REG_PERMANENT},
[IREG_NPXSx] = { REG_WORD, &cpu_state.npxs, REG_INTEGER, REG_PERMANENT},
[IREG_flagsx] = { REG_WORD, &cpu_state.flags, REG_INTEGER, REG_PERMANENT},
[IREG_eflagsx] = { REG_WORD, &cpu_state.eflags, REG_INTEGER, REG_PERMANENT},
[IREG_CS_limit_low] = { REG_DWORD, &cpu_state.seg_cs.limit_low, REG_INTEGER, REG_PERMANENT},
[IREG_DS_limit_low] = { REG_DWORD, &cpu_state.seg_ds.limit_low, REG_INTEGER, REG_PERMANENT},
[IREG_ES_limit_low] = { REG_DWORD, &cpu_state.seg_es.limit_low, REG_INTEGER, REG_PERMANENT},
[IREG_FS_limit_low] = { REG_DWORD, &cpu_state.seg_fs.limit_low, REG_INTEGER, REG_PERMANENT},
[IREG_GS_limit_low] = { REG_DWORD, &cpu_state.seg_gs.limit_low, REG_INTEGER, REG_PERMANENT},
[IREG_SS_limit_low] = { REG_DWORD, &cpu_state.seg_ss.limit_low, REG_INTEGER, REG_PERMANENT},
[IREG_CS_limit_high] = { REG_DWORD, &cpu_state.seg_cs.limit_high, REG_INTEGER, REG_PERMANENT},
[IREG_DS_limit_high] = { REG_DWORD, &cpu_state.seg_ds.limit_high, REG_INTEGER, REG_PERMANENT},
[IREG_ES_limit_high] = { REG_DWORD, &cpu_state.seg_es.limit_high, REG_INTEGER, REG_PERMANENT},
[IREG_FS_limit_high] = { REG_DWORD, &cpu_state.seg_fs.limit_high, REG_INTEGER, REG_PERMANENT},
[IREG_GS_limit_high] = { REG_DWORD, &cpu_state.seg_gs.limit_high, REG_INTEGER, REG_PERMANENT},
[IREG_SS_limit_high] = { REG_DWORD, &cpu_state.seg_ss.limit_high, REG_INTEGER, REG_PERMANENT},
/*Temporary registers are stored on the stack, and are not guaranteed to
be preserved across uOPs. They will not be written back if they will
not be read again.*/
[IREG_temp0] = { REG_DWORD, (void *) 16, REG_INTEGER, REG_VOLATILE },
[IREG_temp1] = { REG_DWORD, (void *) 20, REG_INTEGER, REG_VOLATILE },
[IREG_temp2] = { REG_DWORD, (void *) 24, REG_INTEGER, REG_VOLATILE },
[IREG_temp3] = { REG_DWORD, (void *) 28, REG_INTEGER, REG_VOLATILE },
[IREG_temp0d] = { REG_DOUBLE, (void *) 40, REG_FP, REG_VOLATILE },
[IREG_temp1d] = { REG_DOUBLE, (void *) 48, REG_FP, REG_VOLATILE },
};
void
codegen_reg_mark_as_required(void)
{
for (uint8_t reg = 0; reg < IREG_COUNT; reg++) {
int last_version = reg_last_version[reg];
if (last_version > 0 && ireg_data[reg].is_volatile == REG_PERMANENT)
reg_version[reg][last_version].flags |= REG_FLAGS_REQUIRED;
}
}
int
reg_is_native_size(ir_reg_t ir_reg)
{
int native_size = ireg_data[IREG_GET_REG(ir_reg.reg)].native_size;
int requested_size = IREG_GET_SIZE(ir_reg.reg);
switch (native_size) {
case REG_BYTE:
case REG_FPU_ST_BYTE:
return (requested_size == IREG_SIZE_B);
case REG_WORD:
return (requested_size == IREG_SIZE_W);
case REG_DWORD:
return (requested_size == IREG_SIZE_L);
case REG_QWORD:
case REG_FPU_ST_QWORD:
case REG_DOUBLE:
case REG_FPU_ST_DOUBLE:
return ((requested_size == IREG_SIZE_D) || (requested_size == IREG_SIZE_Q));
case REG_POINTER:
if (sizeof(void *) == 4)
return (requested_size == IREG_SIZE_L);
return (requested_size == IREG_SIZE_Q);
default:
fatal("get_reg_is_native_size: unknown native size %i\n", native_size);
}
return 0;
}
void
codegen_reg_reset(void)
{
int c;
host_reg_set.regs = _host_regs;
host_reg_set.dirty = _host_reg_dirty;
host_reg_set.reg_list = codegen_host_reg_list;
host_reg_set.locked = 0;
host_reg_set.nr_regs = CODEGEN_HOST_REGS;
host_fp_reg_set.regs = host_fp_regs;
host_fp_reg_set.dirty = host_fp_reg_dirty;
host_fp_reg_set.reg_list = codegen_host_fp_reg_list;
host_fp_reg_set.locked = 0;
host_fp_reg_set.nr_regs = CODEGEN_HOST_FP_REGS;
for (c = 0; c < IREG_COUNT; c++) {
reg_last_version[c] = 0;
reg_version[c][0].refcount = 0;
}
for (c = 0; c < CODEGEN_HOST_REGS; c++) {
host_reg_set.regs[c] = invalid_ir_reg;
host_reg_set.dirty[c] = 0;
}
for (c = 0; c < CODEGEN_HOST_FP_REGS; c++) {
host_fp_reg_set.regs[c] = invalid_ir_reg;
host_fp_reg_set.dirty[c] = 0;
}
reg_dead_list = 0;
max_version_refcount = 0;
}
static inline int
ir_get_refcount(ir_reg_t ir_reg)
{
return reg_version[IREG_GET_REG(ir_reg.reg)][ir_reg.version].refcount;
}
static inline host_reg_set_t *
get_reg_set(ir_reg_t ir_reg)
{
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type == REG_INTEGER)
return &host_reg_set;
else
return &host_fp_reg_set;
}
static void
codegen_reg_load(host_reg_set_t *reg_set, codeblock_t *block, int c, ir_reg_t ir_reg)
{
switch (ireg_data[IREG_GET_REG(ir_reg.reg)].native_size) {
case REG_WORD:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_INTEGER)
fatal("codegen_reg_load - REG_WORD !REG_INTEGER\n");
#endif
if ((uintptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p < 256)
codegen_direct_read_16_stack(block, reg_set->reg_list[c].reg, (intptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p);
else
codegen_direct_read_16(block, reg_set->reg_list[c].reg, ireg_data[IREG_GET_REG(ir_reg.reg)].p);
break;
case REG_DWORD:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_INTEGER)
fatal("codegen_reg_load - REG_DWORD !REG_INTEGER\n");
#endif
if ((uintptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p < 256)
codegen_direct_read_32_stack(block, reg_set->reg_list[c].reg, (intptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p);
else
codegen_direct_read_32(block, reg_set->reg_list[c].reg, ireg_data[IREG_GET_REG(ir_reg.reg)].p);
break;
case REG_QWORD:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_FP)
fatal("codegen_reg_load - REG_QWORD !REG_FP\n");
#endif
if ((uintptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p < 256)
codegen_direct_read_64_stack(block, reg_set->reg_list[c].reg, (intptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p);
else
codegen_direct_read_64(block, reg_set->reg_list[c].reg, ireg_data[IREG_GET_REG(ir_reg.reg)].p);
break;
case REG_POINTER:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_INTEGER)
fatal("codegen_reg_load - REG_POINTER !REG_INTEGER\n");
#endif
if ((uintptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p < 256)
codegen_direct_read_pointer_stack(block, reg_set->reg_list[c].reg, (intptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p);
else
codegen_direct_read_pointer(block, reg_set->reg_list[c].reg, ireg_data[IREG_GET_REG(ir_reg.reg)].p);
break;
case REG_DOUBLE:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_FP)
fatal("codegen_reg_load - REG_DOUBLE !REG_FP\n");
#endif
if ((uintptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p < 256)
codegen_direct_read_double_stack(block, reg_set->reg_list[c].reg, (intptr_t) ireg_data[IREG_GET_REG(ir_reg.reg)].p);
else
codegen_direct_read_double(block, reg_set->reg_list[c].reg, ireg_data[IREG_GET_REG(ir_reg.reg)].p);
break;
case REG_FPU_ST_BYTE:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_INTEGER)
fatal("codegen_reg_load - REG_FPU_ST_BYTE !REG_INTEGER\n");
#endif
if (block->flags & CODEBLOCK_STATIC_TOP)
codegen_direct_read_8(block, reg_set->reg_list[c].reg, &cpu_state.tag[ir_reg.reg & 7]);
else
codegen_direct_read_st_8(block, reg_set->reg_list[c].reg, &cpu_state.tag[0], ir_reg.reg & 7);
break;
case REG_FPU_ST_QWORD:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_FP)
fatal("codegen_reg_load - REG_FPU_ST_QWORD !REG_FP\n");
#endif
if (block->flags & CODEBLOCK_STATIC_TOP)
codegen_direct_read_64(block, reg_set->reg_list[c].reg, &cpu_state.MM[ir_reg.reg & 7]);
else
codegen_direct_read_st_64(block, reg_set->reg_list[c].reg, &cpu_state.MM[0], ir_reg.reg & 7);
break;
case REG_FPU_ST_DOUBLE:
#ifndef RELEASE_BUILD
if (ireg_data[IREG_GET_REG(ir_reg.reg)].type != REG_FP)
fatal("codegen_reg_load - REG_FPU_ST_DOUBLE !REG_FP\n");
#endif
if (block->flags & CODEBLOCK_STATIC_TOP)
codegen_direct_read_double(block, reg_set->reg_list[c].reg, &cpu_state.ST[ir_reg.reg & 7]);
else
codegen_direct_read_st_double(block, reg_set->reg_list[c].reg, &cpu_state.ST[0], ir_reg.reg & 7);
break;
default:
fatal("codegen_reg_load - native_size=%i reg=%i\n", ireg_data[IREG_GET_REG(ir_reg.reg)].native_size, IREG_GET_REG(ir_reg.reg));
}
reg_set->regs[c] = ir_reg;
}
static void
codegen_reg_writeback(host_reg_set_t *reg_set, codeblock_t *block, int c, int invalidate)
{
int ir_reg = IREG_GET_REG(reg_set->regs[c].reg);
void *p = ireg_data[ir_reg].p;
if (!reg_version[ir_reg][reg_set->regs[c].version].refcount && ireg_data[ir_reg].is_volatile)
return;
switch (ireg_data[ir_reg].native_size) {
case REG_BYTE:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_INTEGER)
fatal("codegen_reg_writeback - REG_BYTE !REG_INTEGER\n");
if ((uintptr_t) p < 256)
fatal("codegen_reg_writeback - REG_BYTE %p\n", p);
#endif
codegen_direct_write_8(block, p, reg_set->reg_list[c].reg);
break;
case REG_WORD:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_INTEGER)
fatal("codegen_reg_writeback - REG_WORD !REG_INTEGER\n");
if ((uintptr_t) p < 256)
fatal("codegen_reg_writeback - REG_WORD %p\n", p);
#endif
codegen_direct_write_16(block, p, reg_set->reg_list[c].reg);
break;
case REG_DWORD:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_INTEGER)
fatal("codegen_reg_writeback - REG_DWORD !REG_INTEGER\n");
#endif
if ((uintptr_t) p < 256)
codegen_direct_write_32_stack(block, (intptr_t) p, reg_set->reg_list[c].reg);
else
codegen_direct_write_32(block, p, reg_set->reg_list[c].reg);
break;
case REG_QWORD:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_FP)
fatal("codegen_reg_writeback - REG_QWORD !REG_FP\n");
#endif
if ((uintptr_t) p < 256)
codegen_direct_write_64_stack(block, (intptr_t) p, reg_set->reg_list[c].reg);
else
codegen_direct_write_64(block, p, reg_set->reg_list[c].reg);
break;
case REG_POINTER:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_INTEGER)
fatal("codegen_reg_writeback - REG_POINTER !REG_INTEGER\n");
if ((uintptr_t) p < 256)
fatal("codegen_reg_writeback - REG_POINTER %p\n", p);
#endif
codegen_direct_write_ptr(block, p, reg_set->reg_list[c].reg);
break;
case REG_DOUBLE:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_FP)
fatal("codegen_reg_writeback - REG_DOUBLE !REG_FP\n");
#endif
if ((uintptr_t) p < 256)
codegen_direct_write_double_stack(block, (intptr_t) p, reg_set->reg_list[c].reg);
else
codegen_direct_write_double(block, p, reg_set->reg_list[c].reg);
break;
case REG_FPU_ST_BYTE:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_INTEGER)
fatal("codegen_reg_writeback - REG_FPU_ST_BYTE !REG_INTEGER\n");
#endif
if (block->flags & CODEBLOCK_STATIC_TOP)
codegen_direct_write_8(block, &cpu_state.tag[reg_set->regs[c].reg & 7], reg_set->reg_list[c].reg);
else
codegen_direct_write_st_8(block, &cpu_state.tag[0], reg_set->regs[c].reg & 7, reg_set->reg_list[c].reg);
break;
case REG_FPU_ST_QWORD:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_FP)
fatal("codegen_reg_writeback - REG_FPU_ST_QWORD !REG_FP\n");
#endif
if (block->flags & CODEBLOCK_STATIC_TOP)
codegen_direct_write_64(block, &cpu_state.MM[reg_set->regs[c].reg & 7], reg_set->reg_list[c].reg);
else
codegen_direct_write_st_64(block, &cpu_state.MM[0], reg_set->regs[c].reg & 7, reg_set->reg_list[c].reg);
break;
case REG_FPU_ST_DOUBLE:
#ifndef RELEASE_BUILD
if (ireg_data[ir_reg].type != REG_FP)
fatal("codegen_reg_writeback - REG_FPU_ST_DOUBLE !REG_FP\n");
#endif
if (block->flags & CODEBLOCK_STATIC_TOP)
codegen_direct_write_double(block, &cpu_state.ST[reg_set->regs[c].reg & 7], reg_set->reg_list[c].reg);
else
codegen_direct_write_st_double(block, &cpu_state.ST[0], reg_set->regs[c].reg & 7, reg_set->reg_list[c].reg);
break;
default:
fatal("codegen_reg_flush - native_size=%i\n", ireg_data[ir_reg].native_size);
}
if (invalidate)
reg_set->regs[c] = invalid_ir_reg;
reg_set->dirty[c] = 0;
}
#ifdef CODEGEN_BACKEND_HAS_MOV_IMM
void
codegen_reg_write_imm(codeblock_t *block, ir_reg_t ir_reg, uint32_t imm_data)
{
int reg_idx = IREG_GET_REG(ir_reg.reg);
void *p = ireg_data[reg_idx].p;
switch (ireg_data[reg_idx].native_size) {
case REG_BYTE:
# ifndef RELEASE_BUILD
if ((uintptr_t) p < 256)
fatal("codegen_reg_write_imm - REG_BYTE %p\n", p);
# endif
codegen_direct_write_8_imm(block, p, imm_data);
break;
case REG_WORD:
# ifndef RELEASE_BUILD
if ((uintptr_t) p < 256)
fatal("codegen_reg_write_imm - REG_WORD %p\n", p);
# endif
codegen_direct_write_16_imm(block, p, imm_data);
break;
case REG_DWORD:
if ((uintptr_t) p < 256)
codegen_direct_write_32_imm_stack(block, (int) ((uintptr_t) p), imm_data);
else
codegen_direct_write_32_imm(block, p, imm_data);
break;
case REG_POINTER:
case REG_QWORD:
case REG_DOUBLE:
case REG_FPU_ST_BYTE:
case REG_FPU_ST_QWORD:
case REG_FPU_ST_DOUBLE:
default:
fatal("codegen_reg_write_imm - native_size=%i\n", ireg_data[reg_idx].native_size);
}
}
#endif
static void
alloc_reg(ir_reg_t ir_reg)
{
host_reg_set_t *reg_set = get_reg_set(ir_reg);
int nr_regs = (reg_set == &host_reg_set) ? CODEGEN_HOST_REGS : CODEGEN_HOST_FP_REGS;
for (int c = 0; c < nr_regs; c++) {
if (IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg)) {
#ifndef RELEASE_BUILD
if (reg_set->regs[c].version != ir_reg.version)
fatal("alloc_reg - host_regs[c].version != ir_reg.version %i %p %p %i %i\n", c, reg_set, &host_reg_set, reg_set->regs[c].reg, ir_reg.reg);
#endif
reg_set->locked |= (1 << c);
return;
}
}
}
static void
alloc_dest_reg(ir_reg_t ir_reg, int dest_reference)
{
host_reg_set_t *reg_set = get_reg_set(ir_reg);
int nr_regs = (reg_set == &host_reg_set) ? CODEGEN_HOST_REGS : CODEGEN_HOST_FP_REGS;
for (int c = 0; c < nr_regs; c++) {
if (IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg)) {
if (reg_set->regs[c].version == ir_reg.version) {
reg_set->locked |= (1 << c);
} else {
/*The immediate prior version may have been
optimised out, so search backwards to find the
last valid version*/
int prev_version = ir_reg.version - 1;
while (prev_version >= 0) {
const reg_version_t *regv = ®_version[IREG_GET_REG(reg_set->regs[c].reg)][prev_version];
if (!(regv->flags & REG_FLAGS_DEAD) && regv->refcount == dest_reference) {
reg_set->locked |= (1 << c);
return;
}
prev_version--;
}
fatal("codegen_reg_alloc_register - host_regs[c].version != dest_reg_a.version %i,%i %i\n", reg_set->regs[c].version, ir_reg.version, dest_reference);
}
return;
}
}
}
void
codegen_reg_alloc_register(ir_reg_t dest_reg_a, ir_reg_t src_reg_a, ir_reg_t src_reg_b, ir_reg_t src_reg_c)
{
int dest_reference = 0;
host_reg_set.locked = 0;
host_fp_reg_set.locked = 0;
if (!ir_reg_is_invalid(dest_reg_a)) {
if (!ir_reg_is_invalid(src_reg_a) && IREG_GET_REG(src_reg_a.reg) == IREG_GET_REG(dest_reg_a.reg) && src_reg_a.version == dest_reg_a.version - 1)
dest_reference++;
if (!ir_reg_is_invalid(src_reg_b) && IREG_GET_REG(src_reg_b.reg) == IREG_GET_REG(dest_reg_a.reg) && src_reg_b.version == dest_reg_a.version - 1)
dest_reference++;
if (!ir_reg_is_invalid(src_reg_c) && IREG_GET_REG(src_reg_c.reg) == IREG_GET_REG(dest_reg_a.reg) && src_reg_c.version == dest_reg_a.version - 1)
dest_reference++;
}
if (!ir_reg_is_invalid(src_reg_a))
alloc_reg(src_reg_a);
if (!ir_reg_is_invalid(src_reg_b))
alloc_reg(src_reg_b);
if (!ir_reg_is_invalid(src_reg_c))
alloc_reg(src_reg_c);
if (!ir_reg_is_invalid(dest_reg_a))
alloc_dest_reg(dest_reg_a, dest_reference);
}
ir_host_reg_t
codegen_reg_alloc_read_reg(codeblock_t *block, ir_reg_t ir_reg, int *host_reg_idx)
{
host_reg_set_t *reg_set = get_reg_set(ir_reg);
int c;
/*Search for required register*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg) && reg_set->regs[c].version == ir_reg.version)
break;
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg) && reg_set->regs[c].version <= ir_reg.version) {
reg_version[IREG_GET_REG(reg_set->regs[c].reg)][reg_set->regs[c].version].refcount++;
break;
}
#ifndef RELEASE_BUILD
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg) && reg_version[IREG_GET_REG(reg_set->regs[c].reg)][reg_set->regs[c].version].refcount)
fatal("codegen_reg_alloc_read_reg - version mismatch!\n");
#endif
}
if (c == reg_set->nr_regs) {
/*No unused registers. Search for an unlocked register with no pending reads*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (!(reg_set->locked & (1 << c)) && IREG_GET_REG(reg_set->regs[c].reg) != IREG_INVALID && !ir_get_refcount(reg_set->regs[c]))
break;
}
if (c == reg_set->nr_regs) {
/*Search for any unlocked register*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (!(reg_set->locked & (1 << c)))
break;
}
#ifndef RELEASE_BUILD
if (c == reg_set->nr_regs)
fatal("codegen_reg_alloc_read_reg - out of registers\n");
#endif
}
if (reg_set->dirty[c])
codegen_reg_writeback(reg_set, block, c, 1);
codegen_reg_load(reg_set, block, c, ir_reg);
reg_set->locked |= (1 << c);
reg_set->dirty[c] = 0;
}
reg_version[IREG_GET_REG(reg_set->regs[c].reg)][reg_set->regs[c].version].refcount--;
#ifndef RELEASE_BUILD
if (reg_version[IREG_GET_REG(reg_set->regs[c].reg)][reg_set->regs[c].version].refcount == (uint8_t) -1)
fatal("codegen_reg_alloc_read_reg - refcount < 0\n");
#endif
if (host_reg_idx)
*host_reg_idx = c;
return reg_set->reg_list[c].reg | IREG_GET_SIZE(ir_reg.reg);
}
ir_host_reg_t
codegen_reg_alloc_write_reg(codeblock_t *block, ir_reg_t ir_reg)
{
host_reg_set_t *reg_set = get_reg_set(ir_reg);
int c;
if (!reg_is_native_size(ir_reg)) {
/*Read in parent register so we can do partial accesses to it*/
ir_reg_t parent_reg;
parent_reg.reg = IREG_GET_REG(ir_reg.reg) | IREG_SIZE_L;
parent_reg.version = ir_reg.version - 1;
reg_version[IREG_GET_REG(ir_reg.reg)][ir_reg.version - 1].refcount++;
codegen_reg_alloc_read_reg(block, parent_reg, &c);
#ifndef RELEASE_BUILD
if (IREG_GET_REG(reg_set->regs[c].reg) != IREG_GET_REG(ir_reg.reg) || reg_set->regs[c].version > ir_reg.version - 1)
fatal("codegen_reg_alloc_write_reg sub_reg - doesn't match %i %02x.%i %02x.%i\n", c,
reg_set->regs[c].reg, reg_set->regs[c].version,
ir_reg.reg, ir_reg.version);
#endif
reg_set->regs[c].reg = ir_reg.reg;
reg_set->regs[c].version = ir_reg.version;
reg_set->dirty[c] = 1;
return reg_set->reg_list[c].reg | IREG_GET_SIZE(ir_reg.reg);
}
/*Search for previous version in host register*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg)) {
if (reg_set->regs[c].version <= ir_reg.version - 1) {
#ifndef RELEASE_BUILD
if (reg_version[IREG_GET_REG(reg_set->regs[c].reg)][reg_set->regs[c].version].refcount != 0)
fatal("codegen_reg_alloc_write_reg - previous version refcount != 0\n");
#endif
break;
}
}
}
if (c == reg_set->nr_regs) {
/*Search for unused registers*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (ir_reg_is_invalid(reg_set->regs[c]))
break;
}
if (c == reg_set->nr_regs) {
/*No unused registers. Search for an unlocked register*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (!(reg_set->locked & (1 << c)))
break;
}
#ifndef RELEASE_BUILD
if (c == reg_set->nr_regs)
fatal("codegen_reg_alloc_write_reg - out of registers\n");
#endif
if (reg_set->dirty[c])
codegen_reg_writeback(reg_set, block, c, 1);
}
}
reg_set->regs[c].reg = ir_reg.reg;
reg_set->regs[c].version = ir_reg.version;
reg_set->dirty[c] = 1;
return reg_set->reg_list[c].reg | IREG_GET_SIZE(ir_reg.reg);
}
#ifdef CODEGEN_BACKEND_HAS_MOV_IMM
int
codegen_reg_is_loaded(ir_reg_t ir_reg)
{
const host_reg_set_t *reg_set = get_reg_set(ir_reg);
/*Search for previous version in host register*/
for (int c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(ir_reg.reg)) {
if (reg_set->regs[c].version <= ir_reg.version - 1) {
# ifndef RELEASE_BUILD
if (reg_version[IREG_GET_REG(reg_set->regs[c].reg)][reg_set->regs[c].version].refcount != 0)
fatal("codegen_reg_alloc_write_reg - previous version refcount != 0\n");
# endif
return 1;
}
}
}
return 0;
}
#endif
void
codegen_reg_rename(codeblock_t *block, ir_reg_t src, ir_reg_t dst)
{
host_reg_set_t *reg_set = get_reg_set(src);
int c;
int target;
#if 0
pclog("rename: %i.%i -> %i.%i\n", src.reg,src.version, dst.reg, dst.version);
#endif
/*Search for required register*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(src.reg) && reg_set->regs[c].version == src.version)
break;
}
#ifndef RELEASE_BUILD
if (c == reg_set->nr_regs)
fatal("codegen_reg_rename: Can't find register to rename\n");
#endif
target = c;
if (reg_set->dirty[target])
codegen_reg_writeback(reg_set, block, target, 0);
reg_set->regs[target] = dst;
reg_set->dirty[target] = 1;
#if 0
pclog("renamed reg %i dest=%i.%i\n", target, dst.reg, dst.version);
#endif
/*Invalidate any stale copies of the dest register*/
for (c = 0; c < reg_set->nr_regs; c++) {
if (c == target)
continue;
if (!ir_reg_is_invalid(reg_set->regs[c]) && IREG_GET_REG(reg_set->regs[c].reg) == IREG_GET_REG(dst.reg)) {
reg_set->regs[c] = invalid_ir_reg;
reg_set->dirty[c] = 0;
}
}
}
void
codegen_reg_flush(UNUSED(ir_data_t *ir), codeblock_t *block)
{
host_reg_set_t *reg_set;
int c;
reg_set = &host_reg_set;
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && reg_set->dirty[c]) {
codegen_reg_writeback(reg_set, block, c, 0);
}
if (reg_set->reg_list[c].flags & HOST_REG_FLAG_VOLATILE) {
reg_set->regs[c] = invalid_ir_reg;
reg_set->dirty[c] = 0;
}
}
reg_set = &host_fp_reg_set;
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && reg_set->dirty[c]) {
codegen_reg_writeback(reg_set, block, c, 0);
}
if (reg_set->reg_list[c].flags & HOST_REG_FLAG_VOLATILE) {
reg_set->regs[c] = invalid_ir_reg;
reg_set->dirty[c] = 0;
}
}
}
void
codegen_reg_flush_invalidate(UNUSED(ir_data_t *ir), codeblock_t *block)
{
host_reg_set_t *reg_set;
int c;
reg_set = &host_reg_set;
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && reg_set->dirty[c]) {
codegen_reg_writeback(reg_set, block, c, 1);
}
reg_set->regs[c] = invalid_ir_reg;
reg_set->dirty[c] = 0;
}
reg_set = &host_fp_reg_set;
for (c = 0; c < reg_set->nr_regs; c++) {
if (!ir_reg_is_invalid(reg_set->regs[c]) && reg_set->dirty[c]) {
codegen_reg_writeback(reg_set, block, c, 1);
}
reg_set->regs[c] = invalid_ir_reg;
reg_set->dirty[c] = 0;
}
}
/*Process dead register list, and optimise out register versions and uOPs where
possible*/
void
codegen_reg_process_dead_list(ir_data_t *ir)
{
while (reg_dead_list) {
int version = reg_dead_list & 0xff;
int reg = reg_dead_list >> 8;
reg_version_t *regv = ®_version[reg][version];
uop_t *uop = &ir->uops[regv->parent_uop];
/*Barrier uOPs should be preserved*/
if (!(uop->type & (UOP_TYPE_BARRIER | UOP_TYPE_ORDER_BARRIER))) {
uop->type = UOP_INVALID;
/*Adjust refcounts on source registers. If these drop to
zero then those registers can be considered for removal*/
if (uop->src_reg_a.reg != IREG_INVALID) {
reg_version_t *src_regv = ®_version[IREG_GET_REG(uop->src_reg_a.reg)][uop->src_reg_a.version];
src_regv->refcount--;
if (!src_regv->refcount)
add_to_dead_list(src_regv, IREG_GET_REG(uop->src_reg_a.reg), uop->src_reg_a.version);
}
if (uop->src_reg_b.reg != IREG_INVALID) {
reg_version_t *src_regv = ®_version[IREG_GET_REG(uop->src_reg_b.reg)][uop->src_reg_b.version];
src_regv->refcount--;
if (!src_regv->refcount)
add_to_dead_list(src_regv, IREG_GET_REG(uop->src_reg_b.reg), uop->src_reg_b.version);
}
if (uop->src_reg_c.reg != IREG_INVALID) {
reg_version_t *src_regv = ®_version[IREG_GET_REG(uop->src_reg_c.reg)][uop->src_reg_c.version];
src_regv->refcount--;
if (!src_regv->refcount)
add_to_dead_list(src_regv, IREG_GET_REG(uop->src_reg_c.reg), uop->src_reg_c.version);
}
regv->flags |= REG_FLAGS_DEAD;
}
reg_dead_list = regv->next;
}
}
``` | /content/code_sandbox/src/codegen_new/codegen_reg.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 10,330 |
```objective-c
uint32_t ropPUSH_r16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_r32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_r16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_r32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_imm_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_imm_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_imm_16_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_imm_32_8(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_W(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_L(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_CS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_DS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_ES_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_FS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_GS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_SS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_CS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_DS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_ES_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_FS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_GS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSH_SS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_DS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_ES_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_FS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_GS_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_DS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_ES_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_FS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOP_GS_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropLEAVE_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropLEAVE_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSHA_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSHA_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOPA_16(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPOPA_32(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSHF(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
uint32_t ropPUSHFD(codeblock_t *block, ir_data_t *ir, uint8_t opcode, uint32_t fetchdat, uint32_t op_32, uint32_t op_pc);
``` | /content/code_sandbox/src/codegen_new/codegen_ops_stack.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,691 |
```objective-c
#ifndef _CODEGEN_BACKEND_X86_DEFS_H_
#define _CODEGEN_BACKEND_X86_DEFS_H_
#define REG_EAX 0
#define REG_ECX 1
#define REG_EDX 2
#define REG_EBX 3
#define REG_ESP 4
#define REG_EBP 5
#define REG_ESI 6
#define REG_EDI 7
#define REG_XMM0 0
#define REG_XMM1 1
#define REG_XMM2 2
#define REG_XMM3 3
#define REG_XMM4 4
#define REG_XMM5 5
#define REG_XMM6 6
#define REG_XMM7 7
#define REG_XMM_TEMP REG_XMM7
#define REG_XMM_TEMP2 REG_XMM6
#define CODEGEN_HOST_REGS 3
#define CODEGEN_HOST_FP_REGS 6
extern void *codegen_mem_load_byte;
extern void *codegen_mem_load_word;
extern void *codegen_mem_load_long;
extern void *codegen_mem_load_quad;
extern void *codegen_mem_load_single;
extern void *codegen_mem_load_double;
extern void *codegen_mem_store_byte;
extern void *codegen_mem_store_word;
extern void *codegen_mem_store_long;
extern void *codegen_mem_store_quad;
extern void *codegen_mem_store_single;
extern void *codegen_mem_store_double;
extern void *codegen_gpf_rout;
extern void *codegen_exit_rout;
#define STACK_ARG0 (0)
#define STACK_ARG1 (4)
#define STACK_ARG2 (8)
#define STACK_ARG3 (12)
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_backend_x86_defs.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 343 |
```objective-c
#ifdef USE_NEW_DYNAREC
# define CALL_FAR_w(new_seg, new_pc) \
old_cs = CS; \
old_pc = cpu_state.pc; \
cpu_state.pc = new_pc; \
optype = CALL; \
cgate16 = cgate32 = 0; \
if (msw & 1) \
op_loadcscall(new_seg, old_pc); \
else { \
op_loadcs(new_seg); \
cycles -= timing_call_rm; \
} \
optype = 0; \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
oldss = ss; \
if (cgate32) { \
uint32_t old_esp = ESP; \
PUSH_L(old_cs); \
if (cpu_state.abrt) { \
CS = old_cs; \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_L(old_pc); \
if (cpu_state.abrt) { \
CS = old_cs; \
ESP = old_esp; \
return 1; \
} \
} else { \
uint32_t old_esp = ESP; \
PUSH_W(old_cs); \
if (cpu_state.abrt) { \
CS = old_cs; \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_W(old_pc); \
if (cpu_state.abrt) { \
CS = old_cs; \
ESP = old_esp; \
return 1; \
} \
}
# define CALL_FAR_l(new_seg, new_pc) \
old_cs = CS; \
old_pc = cpu_state.pc; \
cpu_state.pc = new_pc; \
optype = CALL; \
cgate16 = cgate32 = 0; \
if (msw & 1) \
op_loadcscall(new_seg, old_pc); \
else { \
op_loadcs(new_seg); \
cycles -= timing_call_rm; \
} \
optype = 0; \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
oldss = ss; \
if (cgate16) { \
uint32_t old_esp = ESP; \
PUSH_W(old_cs); \
if (cpu_state.abrt) { \
CS = old_cs; \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_W(old_pc); \
if (cpu_state.abrt) { \
CS = old_cs; \
ESP = old_esp; \
return 1; \
} \
} else { \
uint32_t old_esp = ESP; \
PUSH_L(old_cs); \
if (cpu_state.abrt) { \
CS = old_cs; \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_L(old_pc); \
if (cpu_state.abrt) { \
CS = old_cs; \
ESP = old_esp; \
return 1; \
} \
}
#else
# define CALL_FAR_w(new_seg, new_pc) \
old_cs = CS; \
old_pc = cpu_state.pc; \
oxpc = cpu_state.pc; \
cpu_state.pc = new_pc; \
optype = CALL; \
cgate16 = cgate32 = 0; \
if (msw & 1) \
op_loadcscall(new_seg); \
else { \
op_loadcs(new_seg); \
cycles -= timing_call_rm; \
} \
optype = 0; \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
oldss = ss; \
if (cgate32) { \
uint32_t old_esp = ESP; \
PUSH_L(old_cs); \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_L(old_pc); \
if (cpu_state.abrt) { \
ESP = old_esp; \
return 1; \
} \
} else { \
uint32_t old_esp = ESP; \
PUSH_W(old_cs); \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_W(old_pc); \
if (cpu_state.abrt) { \
ESP = old_esp; \
return 1; \
} \
}
# define CALL_FAR_l(new_seg, new_pc) \
old_cs = CS; \
old_pc = cpu_state.pc; \
oxpc = cpu_state.pc; \
cpu_state.pc = new_pc; \
optype = CALL; \
cgate16 = cgate32 = 0; \
if (msw & 1) \
op_loadcscall(new_seg); \
else { \
op_loadcs(new_seg); \
cycles -= timing_call_rm; \
} \
optype = 0; \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
oldss = ss; \
if (cgate16) { \
uint32_t old_esp = ESP; \
PUSH_W(old_cs); \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_W(old_pc); \
if (cpu_state.abrt) { \
ESP = old_esp; \
return 1; \
} \
} else { \
uint32_t old_esp = ESP; \
PUSH_L(old_cs); \
if (cpu_state.abrt) { \
cgate16 = cgate32 = 0; \
return 1; \
} \
PUSH_L(old_pc); \
if (cpu_state.abrt) { \
ESP = old_esp; \
return 1; \
} \
}
#endif
static int
opCALL_far_w(uint32_t fetchdat)
{
uint32_t old_cs;
uint32_t old_pc;
uint16_t new_cs;
uint16_t new_pc;
int cycles_old = cycles;
UN_USED(cycles_old);
new_pc = getwordf();
new_cs = getword();
if (cpu_state.abrt)
return 1;
CALL_FAR_w(new_cs, new_pc);
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 5, -1, 0, 0, cgate16 ? 2 : 0, cgate16 ? 0 : 2, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opCALL_far_l(uint32_t fetchdat)
{
uint32_t old_cs;
uint32_t old_pc;
uint32_t new_cs;
uint32_t new_pc;
int cycles_old = cycles;
UN_USED(cycles_old);
new_pc = getlong();
new_cs = getword();
if (cpu_state.abrt)
return 1;
CALL_FAR_l(new_cs, new_pc);
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 7, -1, 0, 0, cgate16 ? 2 : 0, cgate16 ? 0 : 2, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opFF_w_a16(uint32_t fetchdat)
{
uint16_t old_cs;
uint16_t new_cs;
uint32_t old_pc;
uint32_t new_pc;
int cycles_old = cycles;
UN_USED(cycles_old);
uint16_t temp;
fetch_ea_16(fetchdat);
switch (rmdat & 0x38) {
case 0x00: /*INC w*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
temp = geteaw();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
seteaw(temp + 1);
if (cpu_state.abrt)
return 1;
setadd16nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
break;
case 0x08: /*DEC w*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
temp = geteaw();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
seteaw(temp - 1);
if (cpu_state.abrt)
return 1;
setsub16nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
break;
case 0x10: /*CALL*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
new_pc = geteaw();
if (cpu_state.abrt)
return 1;
PUSH_W(cpu_state.pc);
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN((cpu_mod == 3) ? 7 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 1, 0, 0);
PREFETCH_FLUSH();
break;
case 0x18: /*CALL far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
new_pc = readmemw(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, (cpu_state.eaaddr + 2));
if (cpu_state.abrt)
return 1;
CALL_FAR_w(new_cs, new_pc);
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 2, 0, cgate16 ? 2 : 0, cgate16 ? 0 : 2, 0);
PREFETCH_FLUSH();
break;
case 0x20: /*JMP*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
new_pc = geteaw();
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN((cpu_mod == 3) ? 7 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
PREFETCH_FLUSH();
break;
case 0x28: /*JMP far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
#ifdef USE_NEW_DYNAREC
old_pc = cpu_state.pc;
#else
oxpc = cpu_state.pc;
#endif
new_pc = readmemw(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
#ifdef USE_NEW_DYNAREC
op_loadcsjmp(new_cs, old_pc);
#else
op_loadcsjmp(new_cs, oxpc);
#endif
if (cpu_state.abrt)
return 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
break;
case 0x30: /*PUSH w*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
temp = geteaw();
if (cpu_state.abrt)
return 1;
PUSH_W(temp);
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 1, 0, 0);
break;
default:
// fatal("Bad FF opcode %02X\n",rmdat&0x38);
x86illegal();
}
return cpu_state.abrt;
}
static int
opFF_w_a32(uint32_t fetchdat)
{
uint16_t old_cs;
uint16_t new_cs;
uint32_t old_pc;
uint32_t new_pc;
int cycles_old = cycles;
UN_USED(cycles_old);
uint16_t temp;
fetch_ea_32(fetchdat);
switch (rmdat & 0x38) {
case 0x00: /*INC w*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
temp = geteaw();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
seteaw(temp + 1);
if (cpu_state.abrt)
return 1;
setadd16nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
break;
case 0x08: /*DEC w*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
temp = geteaw();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
seteaw(temp - 1);
if (cpu_state.abrt)
return 1;
setsub16nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
break;
case 0x10: /*CALL*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
new_pc = geteaw();
if (cpu_state.abrt)
return 1;
PUSH_W(cpu_state.pc);
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN((cpu_mod == 3) ? 7 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 1, 0, 1);
PREFETCH_FLUSH();
break;
case 0x18: /*CALL far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
new_pc = readmemw(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, (cpu_state.eaaddr + 2));
if (cpu_state.abrt)
return 1;
CALL_FAR_w(new_cs, new_pc);
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 2, 0, cgate16 ? 2 : 0, cgate16 ? 0 : 2, 1);
PREFETCH_FLUSH();
break;
case 0x20: /*JMP*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
new_pc = geteaw();
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 1, 0, 0, 0, 1);
PREFETCH_FLUSH();
break;
case 0x28: /*JMP far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
#ifdef USE_NEW_DYNAREC
old_pc = cpu_state.pc;
#else
oxpc = cpu_state.pc;
#endif
new_pc = readmemw(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
#ifdef USE_NEW_DYNAREC
op_loadcsjmp(new_cs, old_pc);
#else
op_loadcsjmp(new_cs, oxpc);
#endif
if (cpu_state.abrt)
return 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 2, 0, 0, 0, 1);
PREFETCH_FLUSH();
break;
case 0x30: /*PUSH w*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1UL);
}
temp = geteaw();
if (cpu_state.abrt)
return 1;
PUSH_W(temp);
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 1, 0, 1);
break;
default:
// fatal("Bad FF opcode %02X\n",rmdat&0x38);
x86illegal();
}
return cpu_state.abrt;
}
static int
opFF_l_a16(uint32_t fetchdat)
{
uint16_t old_cs;
uint16_t new_cs;
uint32_t old_pc;
uint32_t new_pc;
int cycles_old = cycles;
UN_USED(cycles_old);
uint32_t temp;
fetch_ea_16(fetchdat);
switch (rmdat & 0x38) {
case 0x00: /*INC l*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
temp = geteal();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
seteal(temp + 1);
if (cpu_state.abrt)
return 1;
setadd32nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
break;
case 0x08: /*DEC l*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
temp = geteal();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
seteal(temp - 1);
if (cpu_state.abrt)
return 1;
setsub32nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
break;
case 0x10: /*CALL*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
new_pc = geteal();
if (cpu_state.abrt)
return 1;
PUSH_L(cpu_state.pc);
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN((cpu_mod == 3) ? 7 : 10, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 1, 0);
PREFETCH_FLUSH();
break;
case 0x18: /*CALL far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5UL);
}
new_pc = readmeml(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, (cpu_state.eaaddr + 4));
if (cpu_state.abrt)
return 1;
CALL_FAR_l(new_cs, new_pc);
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 1, 1, cgate16 ? 2 : 0, cgate16 ? 0 : 2, 0);
PREFETCH_FLUSH();
break;
case 0x20: /*JMP*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
new_pc = geteal();
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 0, 1, 0, 0, 0);
PREFETCH_FLUSH();
break;
case 0x28: /*JMP far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5UL);
}
#ifdef USE_NEW_DYNAREC
old_pc = cpu_state.pc;
#else
oxpc = cpu_state.pc;
#endif
new_pc = readmeml(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
#ifdef USE_NEW_DYNAREC
op_loadcsjmp(new_cs, old_pc);
#else
op_loadcsjmp(new_cs, oxpc);
#endif
if (cpu_state.abrt)
return 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 1, 1, 0, 0, 0);
PREFETCH_FLUSH();
break;
case 0x30: /*PUSH l*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
temp = geteal();
if (cpu_state.abrt)
return 1;
PUSH_L(temp);
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 1, 0);
break;
default:
// fatal("Bad FF opcode %02X\n",rmdat&0x38);
x86illegal();
}
return cpu_state.abrt;
}
static int
opFF_l_a32(uint32_t fetchdat)
{
uint16_t old_cs;
uint16_t new_cs;
uint32_t old_pc;
uint32_t new_pc;
int cycles_old = cycles;
UN_USED(cycles_old);
uint32_t temp;
fetch_ea_32(fetchdat);
switch (rmdat & 0x38) {
case 0x00: /*INC l*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
temp = geteal();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
seteal(temp + 1);
if (cpu_state.abrt)
return 1;
setadd32nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
break;
case 0x08: /*DEC l*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
temp = geteal();
if (cpu_state.abrt)
return 1;
if (cpu_mod != 3) {
CHECK_WRITE(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
seteal(temp - 1);
if (cpu_state.abrt)
return 1;
setsub32nc(temp, 1);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
break;
case 0x10: /*CALL*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
new_pc = geteal();
if (cpu_state.abrt)
return 1;
PUSH_L(cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN((cpu_mod == 3) ? 7 : 10, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 1, 1);
PREFETCH_FLUSH();
break;
case 0x18: /*CALL far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5UL);
}
new_pc = readmeml(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, (cpu_state.eaaddr + 4));
if (cpu_state.abrt)
return 1;
CALL_FAR_l(new_cs, new_pc);
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 1, 1, cgate16 ? 2 : 0, cgate16 ? 0 : 2, 1);
PREFETCH_FLUSH();
break;
case 0x20: /*JMP*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
new_pc = geteal();
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
CPU_BLOCK_END();
if (is486) {
CLOCK_CYCLES(5);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 7 : 10);
}
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 1, 1, 0, 0, 1);
PREFETCH_FLUSH();
break;
case 0x28: /*JMP far*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5UL);
}
#ifdef USE_NEW_DYNAREC
old_pc = cpu_state.pc;
#else
oxpc = cpu_state.pc;
#endif
new_pc = readmeml(easeg, cpu_state.eaaddr);
new_cs = readmemw(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
cpu_state.pc = new_pc;
#ifdef USE_NEW_DYNAREC
op_loadcsjmp(new_cs, old_pc);
#else
op_loadcsjmp(new_cs, oxpc);
#endif
if (cpu_state.abrt)
return 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 2, rmdat, 1, 1, 0, 0, 1);
PREFETCH_FLUSH();
break;
case 0x30: /*PUSH l*/
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3UL);
}
temp = geteal();
if (cpu_state.abrt)
return 1;
PUSH_L(temp);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 1, 1);
break;
default:
// fatal("Bad FF opcode %02X\n",rmdat&0x38);
x86illegal();
}
return cpu_state.abrt;
}
``` | /content/code_sandbox/src/cpu/x86_ops_call.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 8,381 |
```c
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <wchar.h>
#include <math.h>
#ifndef INFINITY
# define INFINITY (__builtin_inff())
#endif
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include <86box/timer.h>
#include "x86.h"
#include "x87_sf.h"
#include "x87.h"
#include <86box/nmi.h>
#include <86box/mem.h>
#include <86box/smram.h>
#include <86box/pic.h>
#include <86box/pit.h>
#include <86box/fdd.h>
#include <86box/fdc.h>
#include <86box/keyboard.h>
#include <86box/timer.h>
#include "x86seg_common.h"
#include "x86seg.h"
#include "386_common.h"
#include "x86_flags.h"
MMX_REG *MMP[8];
uint16_t *MMEP[8];
static uint16_t MME[8];
#define MMX_GETREGP(r) fpu_softfloat ? ((MMX_REG *) &fpu_state.st_space[r].signif) : &(cpu_state.MM[r])
void
mmx_init(void)
{
memset(MME, 0xff, sizeof(MME));
for (uint8_t i = 0; i < 8; i++) {
if (fpu_softfloat) {
MMP[i] = (MMX_REG *) &fpu_state.st_space[i].signif;
MMEP[i] = (uint16_t *) &fpu_state.st_space[i].signExp;
} else {
MMP[i] = &(cpu_state.MM[i]);
MMEP[i] = &(MME[i]);
}
}
}
``` | /content/code_sandbox/src/cpu/x86_ops_mmx.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 391 |
```objective-c
#ifdef USE_NEW_DYNAREC
# define CPU_SET_OXPC
#else
# define CPU_SET_OXPC oxpc = cpu_state.pc;
#endif
#define RETF_a16(stack_offset) \
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) { \
op_pmoderetf(0, stack_offset); \
return 1; \
} \
CPU_SET_OXPC \
if (stack32) { \
cpu_state.pc = readmemw(ss, ESP); \
op_loadcs(readmemw(ss, ESP + 2)); \
} else { \
cpu_state.pc = readmemw(ss, SP); \
op_loadcs(readmemw(ss, SP + 2)); \
} \
if (cpu_state.abrt) \
return 1; \
if (stack32) \
ESP += 4 + stack_offset; \
else \
SP += 4 + stack_offset; \
cycles -= timing_retf_rm;
#define RETF_a32(stack_offset) \
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) { \
op_pmoderetf(1, stack_offset); \
return 1; \
} \
CPU_SET_OXPC \
if (stack32) { \
cpu_state.pc = readmeml(ss, ESP); \
op_loadcs(readmeml(ss, ESP + 4) & 0xffff); \
} else { \
cpu_state.pc = readmeml(ss, SP); \
op_loadcs(readmeml(ss, SP + 4) & 0xffff); \
} \
if (cpu_state.abrt) \
return 1; \
if (stack32) \
ESP += 8 + stack_offset; \
else \
SP += 8 + stack_offset; \
cycles -= timing_retf_rm;
static int
opRETF_a16(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a16(0);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRETF_a32(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a32(0);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 2, 0, 0, 1);
PREFETCH_FLUSH();
return 0;
}
static int
opRETF_a16_imm(uint32_t fetchdat)
{
uint16_t offset = getwordf();
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a16(offset);
PREFETCH_RUN(cycles_old - cycles, 3, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRETF_a32_imm(uint32_t fetchdat)
{
uint16_t offset = getwordf();
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a32(offset);
PREFETCH_RUN(cycles_old - cycles, 3, -1, 0, 2, 0, 0, 1);
PREFETCH_FLUSH();
return 0;
}
static int
opIRET_186(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
if (msw & 1) {
optype = IRET;
op_pmodeiret(0);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmemw(ss, ESP);
new_cs = readmemw(ss, ESP + 2);
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ESP + 4) & 0xffd5) | 2;
ESP += 6;
} else {
cpu_state.pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ((SP + 4) & 0xffff)) & 0x0fd5) | 2;
SP += 6;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
flags_extract();
nmi_enable = 1;
rf_flag_no_clear = 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
static int
opIRET_286(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
if (msw & 1) {
optype = IRET;
op_pmodeiret(0);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmemw(ss, ESP);
new_cs = readmemw(ss, ESP + 2);
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ESP + 4) & 0xffd5) | 2;
ESP += 6;
} else {
cpu_state.pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ((SP + 4) & 0xffff)) & 0x0fd5) | 2;
SP += 6;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
flags_extract();
nmi_enable = 1;
rf_flag_no_clear = 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
static int
opIRET(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
if (cr4 & CR4_VME) {
uint16_t new_pc;
uint16_t new_cs;
uint16_t new_flags;
new_pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
new_flags = readmemw(ss, ((SP + 4) & 0xffff));
if (cpu_state.abrt)
return 1;
if ((new_flags & T_FLAG) || ((new_flags & I_FLAG) && (cpu_state.eflags & VIP_FLAG))) {
x86gpf(NULL, 0);
return 1;
}
SP += 6;
if (new_flags & I_FLAG)
cpu_state.eflags |= VIF_FLAG;
else
cpu_state.eflags &= ~VIF_FLAG;
cpu_state.flags = (cpu_state.flags & 0x3300) | (new_flags & 0x4cd5) | 2;
op_loadcs(new_cs);
cpu_state.pc = new_pc;
cycles -= timing_iret_rm;
} else {
x86gpf_expected(NULL, 0);
return 1;
}
} else {
if (msw & 1) {
optype = IRET;
op_pmodeiret(0);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmemw(ss, ESP);
new_cs = readmemw(ss, ESP + 2);
cpu_state.flags = (readmemw(ss, ESP + 4) & 0xffd5) | 2;
ESP += 6;
} else {
cpu_state.pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
cpu_state.flags = (readmemw(ss, ((SP + 4) & 0xffff)) & 0xffd5) | 2;
SP += 6;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
}
flags_extract();
nmi_enable = 1;
rf_flag_no_clear = 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
static int
opIRETD(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf_expected(NULL, 0);
return 1;
}
if (msw & 1) {
optype = IRET;
op_pmodeiret(1);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmeml(ss, ESP);
new_cs = readmemw(ss, ESP + 4);
cpu_state.flags = (readmemw(ss, ESP + 8) & 0xffd5) | 2;
cpu_state.eflags = readmemw(ss, ESP + 10);
ESP += 12;
} else {
cpu_state.pc = readmeml(ss, SP);
new_cs = readmemw(ss, ((SP + 4) & 0xffff));
cpu_state.flags = (readmemw(ss, (SP + 8) & 0xffff) & 0xffd5) | 2;
cpu_state.eflags = readmemw(ss, (SP + 10) & 0xffff);
SP += 12;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
flags_extract();
nmi_enable = 1;
rf_flag_no_clear = 1;
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 2, 0, 0, 1);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
``` | /content/code_sandbox/src/cpu/x86_ops_ret_2386.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,590 |
```objective-c
static int
opMOV_AL_imm(uint32_t fetchdat)
{
AL = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_AH_imm(uint32_t fetchdat)
{
AH = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_BL_imm(uint32_t fetchdat)
{
BL = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_BH_imm(uint32_t fetchdat)
{
BH = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_CL_imm(uint32_t fetchdat)
{
CL = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_CH_imm(uint32_t fetchdat)
{
CH = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_DL_imm(uint32_t fetchdat)
{
DL = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_DH_imm(uint32_t fetchdat)
{
DH = getbytef();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_AX_imm(uint32_t fetchdat)
{
AX = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_BX_imm(uint32_t fetchdat)
{
BX = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_CX_imm(uint32_t fetchdat)
{
CX = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_DX_imm(uint32_t fetchdat)
{
DX = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_SI_imm(uint32_t fetchdat)
{
SI = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_DI_imm(uint32_t fetchdat)
{
DI = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_BP_imm(uint32_t fetchdat)
{
BP = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_SP_imm(uint32_t fetchdat)
{
SP = getwordf();
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_EAX_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
EAX = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_EBX_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
EBX = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_ECX_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
ECX = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_EDX_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
EDX = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_ESI_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
ESI = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_EDI_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
EDI = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_EBP_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
EBP = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_ESP_imm(uint32_t fetchdat)
{
uint32_t templ = getlong();
if (cpu_state.abrt)
return 1;
ESP = templ;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_b_imm_a16(uint32_t fetchdat)
{
uint8_t temp;
fetch_ea_16(fetchdat);
ILLEGAL_ON((rmdat & 0x38) != 0);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = readmemb(cs, cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
seteab(temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, rmdat, 0, 0, (cpu_mod == 3) ? 1 : 0, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_b_imm_a32(uint32_t fetchdat)
{
uint8_t temp;
fetch_ea_32(fetchdat);
ILLEGAL_ON((rmdat & 0x38) != 0);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = getbyte();
if (cpu_state.abrt)
return 1;
seteab(temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, rmdat, 0, 0, (cpu_mod == 3) ? 1 : 0, 0, 1);
return cpu_state.abrt;
}
static int
opMOV_w_imm_a16(uint32_t fetchdat)
{
uint16_t temp;
fetch_ea_16(fetchdat);
ILLEGAL_ON((rmdat & 0x38) != 0);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = getword();
if (cpu_state.abrt)
return 1;
seteaw(temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 4, rmdat, 0, 0, (cpu_mod == 3) ? 1 : 0, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_w_imm_a32(uint32_t fetchdat)
{
uint16_t temp;
fetch_ea_32(fetchdat);
ILLEGAL_ON((rmdat & 0x38) != 0);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = getword();
if (cpu_state.abrt)
return 1;
seteaw(temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 4, rmdat, 0, 0, (cpu_mod == 3) ? 1 : 0, 0, 1);
return cpu_state.abrt;
}
static int
opMOV_l_imm_a16(uint32_t fetchdat)
{
uint32_t temp;
fetch_ea_16(fetchdat);
ILLEGAL_ON((rmdat & 0x38) != 0);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = getlong();
if (cpu_state.abrt)
return 1;
seteal(temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 6, rmdat, 0, 0, 0, (cpu_mod == 3) ? 1 : 0, 0);
return cpu_state.abrt;
}
static int
opMOV_l_imm_a32(uint32_t fetchdat)
{
uint32_t temp;
fetch_ea_32(fetchdat);
ILLEGAL_ON((rmdat & 0x38) != 0);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = getlong();
if (cpu_state.abrt)
return 1;
seteal(temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 6, rmdat, 0, 0, 0, (cpu_mod == 3) ? 1 : 0, 1);
return cpu_state.abrt;
}
static int
opMOV_AL_a16(uint32_t fetchdat)
{
uint8_t temp;
uint16_t addr = getwordf();
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, addr, addr);
temp = readmemb(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
AL = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 3, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opMOV_AL_a32(uint32_t fetchdat)
{
uint8_t temp;
uint32_t addr = getlong();
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, addr, addr);
temp = readmemb(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
AL = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 5, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opMOV_AX_a16(uint32_t fetchdat)
{
uint16_t temp;
uint16_t addr = getwordf();
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, addr, addr + 1UL);
temp = readmemw(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
AX = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 3, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opMOV_AX_a32(uint32_t fetchdat)
{
uint16_t temp;
uint32_t addr = getlong();
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, addr, addr + 1);
temp = readmemw(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
AX = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 5, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opMOV_EAX_a16(uint32_t fetchdat)
{
uint32_t temp;
uint16_t addr = getwordf();
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, addr, addr + 3UL);
temp = readmeml(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
EAX = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 3, -1, 0, 1, 0, 0, 0);
return 0;
}
static int
opMOV_EAX_a32(uint32_t fetchdat)
{
uint32_t temp;
uint32_t addr = getlong();
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, addr, addr + 3);
temp = readmeml(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
EAX = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 5, -1, 0, 1, 0, 0, 1);
return 0;
}
static int
opMOV_a16_AL(uint32_t fetchdat)
{
uint16_t addr = getwordf();
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, addr, addr);
writememb(cpu_state.ea_seg->base, addr, AL);
CLOCK_CYCLES((is486) ? 1 : 2);
PREFETCH_RUN(2, 3, -1, 0, 0, 1, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_a32_AL(uint32_t fetchdat)
{
uint32_t addr = getlong();
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, addr, addr);
writememb(cpu_state.ea_seg->base, addr, AL);
CLOCK_CYCLES((is486) ? 1 : 2);
PREFETCH_RUN(2, 5, -1, 0, 0, 1, 0, 1);
return cpu_state.abrt;
}
static int
opMOV_a16_AX(uint32_t fetchdat)
{
uint16_t addr = getwordf();
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, addr, addr + 1UL);
writememw(cpu_state.ea_seg->base, addr, AX);
CLOCK_CYCLES((is486) ? 1 : 2);
PREFETCH_RUN(2, 3, -1, 0, 0, 1, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_a32_AX(uint32_t fetchdat)
{
uint32_t addr = getlong();
if (cpu_state.abrt)
return 1;
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, addr, addr + 1);
writememw(cpu_state.ea_seg->base, addr, AX);
CLOCK_CYCLES((is486) ? 1 : 2);
PREFETCH_RUN(2, 5, -1, 0, 0, 1, 0, 1);
return cpu_state.abrt;
}
static int
opMOV_a16_EAX(uint32_t fetchdat)
{
uint16_t addr = getwordf();
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, addr, addr + 3UL);
writememl(cpu_state.ea_seg->base, addr, EAX);
CLOCK_CYCLES((is486) ? 1 : 2);
PREFETCH_RUN(2, 3, -1, 0, 0, 0, 1, 0);
return cpu_state.abrt;
}
static int
opMOV_a32_EAX(uint32_t fetchdat)
{
uint32_t addr = getlong();
if (cpu_state.abrt)
return 1;
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, addr, addr + 3);
writememl(cpu_state.ea_seg->base, addr, EAX);
CLOCK_CYCLES((is486) ? 1 : 2);
PREFETCH_RUN(2, 5, -1, 0, 0, 0, 1, 1);
return cpu_state.abrt;
}
static int
opLEA_w_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
cpu_state.regs[cpu_reg].w = cpu_state.eaaddr;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opLEA_w_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
cpu_state.regs[cpu_reg].w = cpu_state.eaaddr;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opLEA_l_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
cpu_state.regs[cpu_reg].l = cpu_state.eaaddr & 0xffff;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opLEA_l_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
cpu_state.regs[cpu_reg].l = cpu_state.eaaddr;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opXLAT_a16(uint32_t fetchdat)
{
uint32_t addr = (BX + AL) & 0xFFFF;
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
temp = readmemb(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
AL = temp;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opXLAT_a32(uint32_t fetchdat)
{
uint32_t addr = EBX + AL;
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
temp = readmemb(cpu_state.ea_seg->base, addr);
if (cpu_state.abrt)
return 1;
AL = temp;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opMOV_b_r_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod == 3) {
setr8(cpu_rm, getr8(cpu_reg));
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteab(getr8(cpu_reg));
CLOCK_CYCLES(is486 ? 1 : 2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, 1, 0, 0);
}
return cpu_state.abrt;
}
static int
opMOV_b_r_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod == 3) {
setr8(cpu_rm, getr8(cpu_reg));
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteab(getr8(cpu_reg));
CLOCK_CYCLES(is486 ? 1 : 2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, 1, 0, 1);
}
return cpu_state.abrt;
}
static int
opMOV_w_r_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].w = cpu_state.regs[cpu_reg].w;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(cpu_state.regs[cpu_reg].w);
CLOCK_CYCLES(is486 ? 1 : 2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, 1, 0, 0);
}
return cpu_state.abrt;
}
static int
opMOV_w_r_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].w = cpu_state.regs[cpu_reg].w;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(cpu_state.regs[cpu_reg].w);
CLOCK_CYCLES(is486 ? 1 : 2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, 1, 0, 1);
}
return cpu_state.abrt;
}
static int
opMOV_l_r_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].l = cpu_state.regs[cpu_reg].l;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(cpu_state.regs[cpu_reg].l);
CLOCK_CYCLES(is486 ? 1 : 2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, 0, 1, 0);
}
return cpu_state.abrt;
}
static int
opMOV_l_r_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].l = cpu_state.regs[cpu_reg].l;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(cpu_state.regs[cpu_reg].l);
CLOCK_CYCLES(is486 ? 1 : 2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, 0, 1, 1);
}
return cpu_state.abrt;
}
static int
opMOV_r_b_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod == 3) {
setr8(cpu_reg, getr8(cpu_rm));
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
} else {
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr);
temp = geteab();
if (cpu_state.abrt)
return 1;
setr8(cpu_reg, temp);
CLOCK_CYCLES(is486 ? 1 : 4);
PREFETCH_RUN(4, 2, rmdat, 1, 0, 0, 0, 0);
}
return 0;
}
static int
opMOV_r_b_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod == 3) {
setr8(cpu_reg, getr8(cpu_rm));
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
} else {
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr);
temp = geteab();
if (cpu_state.abrt)
return 1;
setr8(cpu_reg, temp);
CLOCK_CYCLES(is486 ? 1 : 4);
PREFETCH_RUN(4, 2, rmdat, 1, 0, 0, 0, 1);
}
return 0;
}
static int
opMOV_r_w_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_reg].w = cpu_state.regs[cpu_rm].w;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
} else {
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1);
temp = geteaw();
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 2, rmdat, 1, 0, 0, 0, 0);
}
return 0;
}
static int
opMOV_r_w_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_reg].w = cpu_state.regs[cpu_rm].w;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
} else {
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1);
temp = geteaw();
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = temp;
CLOCK_CYCLES((is486) ? 1 : 4);
PREFETCH_RUN(4, 2, rmdat, 1, 0, 0, 0, 1);
}
return 0;
}
static int
opMOV_r_l_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_reg].l = cpu_state.regs[cpu_rm].l;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
} else {
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
temp = geteal();
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = temp;
CLOCK_CYCLES(is486 ? 1 : 4);
PREFETCH_RUN(4, 2, rmdat, 0, 1, 0, 0, 0);
}
return 0;
}
static int
opMOV_r_l_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod == 3) {
cpu_state.regs[cpu_reg].l = cpu_state.regs[cpu_rm].l;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1);
} else {
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
temp = geteal();
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = temp;
CLOCK_CYCLES(is486 ? 1 : 4);
PREFETCH_RUN(4, 2, rmdat, 0, 1, 0, 0, 1);
}
return 0;
}
#ifndef OPS_286_386
# define opCMOV(condition) \
static int opCMOV##condition##_w_a16(uint32_t fetchdat) \
{ \
fetch_ea_16(fetchdat); \
if (cond_##condition) { \
if (cpu_mod == 3) \
cpu_state.regs[cpu_reg].w = cpu_state.regs[cpu_rm].w; \
else { \
uint16_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1); \
temp = geteaw(); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].w = temp; \
} \
} \
CLOCK_CYCLES(1); \
return 0; \
} \
static int opCMOV##condition##_w_a32(uint32_t fetchdat) \
{ \
fetch_ea_32(fetchdat); \
if (cond_##condition) { \
if (cpu_mod == 3) \
cpu_state.regs[cpu_reg].w = cpu_state.regs[cpu_rm].w; \
else { \
uint16_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1); \
temp = geteaw(); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].w = temp; \
} \
} \
CLOCK_CYCLES(1); \
return 0; \
} \
static int opCMOV##condition##_l_a16(uint32_t fetchdat) \
{ \
fetch_ea_16(fetchdat); \
if (cond_##condition) { \
if (cpu_mod == 3) \
cpu_state.regs[cpu_reg].l = cpu_state.regs[cpu_rm].l; \
else { \
uint32_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3); \
temp = geteal(); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].l = temp; \
} \
} \
CLOCK_CYCLES(1); \
return 0; \
} \
static int opCMOV##condition##_l_a32(uint32_t fetchdat) \
{ \
fetch_ea_32(fetchdat); \
if (cond_##condition) { \
if (cpu_mod == 3) \
cpu_state.regs[cpu_reg].l = cpu_state.regs[cpu_rm].l; \
else { \
uint32_t temp; \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3); \
SEG_CHECK_READ(cpu_state.ea_seg); \
temp = geteal(); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].l = temp; \
} \
} \
CLOCK_CYCLES(1); \
return 0; \
}
// clang-format off
opCMOV(O)
opCMOV(NO)
opCMOV(B)
opCMOV(NB)
opCMOV(E)
opCMOV(NE)
opCMOV(BE)
opCMOV(NBE)
opCMOV(S)
opCMOV(NS)
opCMOV(P)
opCMOV(NP)
opCMOV(L)
opCMOV(NL)
opCMOV(LE)
opCMOV(NLE)
// clang-format on
#endif
``` | /content/code_sandbox/src/cpu/x86_ops_mov.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 8,041 |
```c
#if defined __ARM_EABI__ || defined _ARM_ || defined _M_ARM
# include <math.h>
# include <stdint.h>
# include <86box/86box.h>
# include "cpu.h"
# include <86box/mem.h>
# include <86box/plat_unused.h>
# include "x86.h"
# include "x86seg_common.h"
# include "x86seg.h"
# include "x87_sf.h"
# include "x87.h"
# include "386_common.h"
# include "codegen.h"
# include "codegen_allocator.h"
# include "codegen_backend.h"
# include "codegen_backend_arm_defs.h"
# include "codegen_backend_arm_ops.h"
# include "codegen_ir_defs.h"
static inline int
get_arm_imm(uint32_t imm_data, uint32_t *arm_imm)
{
int shift = 0;
if (!(imm_data & 0xffff)) {
shift += 16;
imm_data >>= 16;
}
if (!(imm_data & 0xff)) {
shift += 8;
imm_data >>= 8;
}
if (!(imm_data & 0xf)) {
shift += 4;
imm_data >>= 4;
}
if (!(imm_data & 0x3)) {
shift += 2;
imm_data >>= 2;
}
if (imm_data > 0xff) /*Note - should handle rotation round the word*/
return 0;
*arm_imm = imm_data | ((((32 - shift) >> 1) & 15) << 8);
return 1;
}
static inline int
in_range(void *addr, void *base)
{
int diff = (uintptr_t) addr - (uintptr_t) base;
if (diff < -4095 || diff > 4095)
return 0;
return 1;
}
static inline int
in_range_h(void *addr, void *base)
{
int diff = (uintptr_t) addr - (uintptr_t) base;
if (diff < 0 || diff > 255)
return 0;
return 1;
}
void
host_arm_call(codeblock_t *block, void *dst_addr)
{
host_arm_MOV_IMM(block, REG_R3, (uintptr_t) dst_addr);
host_arm_BLX(block, REG_R3);
}
void
host_arm_nop(codeblock_t *block)
{
host_arm_MOV_REG_LSL(block, REG_R0, REG_R0, 0);
}
# define HOST_REG_GET(reg) (IREG_GET_REG(reg) & 0xf)
# define REG_IS_L(size) (size == IREG_SIZE_L)
# define REG_IS_W(size) (size == IREG_SIZE_W)
# define REG_IS_B(size) (size == IREG_SIZE_B)
# define REG_IS_BH(size) (size == IREG_SIZE_BH)
# define REG_IS_D(size) (size == IREG_SIZE_D)
# define REG_IS_Q(size) (size == IREG_SIZE_Q)
static int
codegen_ADD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_ADD_REG_LSL(block, dest_reg, src_reg_a, src_reg_b, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_ADD_REG(block, REG_TEMP, src_reg_a, src_reg_b);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_ADD_REG(block, REG_TEMP, src_reg_a, src_reg_b);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 8);
host_arm_UADD8(block, dest_reg, src_reg_a, REG_TEMP);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b)) {
host_arm_ADD_REG_LSR(block, REG_TEMP, src_reg_a, src_reg_b, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 0);
host_arm_MOV_REG_LSL(block, REG_TEMP, REG_TEMP, 8);
host_arm_UADD8(block, dest_reg, src_reg_a, REG_TEMP);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_AND_IMM(block, REG_TEMP, src_reg_b, 0x0000ff00);
host_arm_UADD8(block, dest_reg, src_reg_a, REG_TEMP);
} else
fatal("ADD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_ADD_IMM(codeblock_t *block, uop_t *uop)
{
# if 0
host_arm_ADD_IMM(block, uop->dest_reg_a_real, uop->src_reg_a_real, uop->imm_data);
return 0;
# endif
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_ADD_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_ADD_IMM(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_ADD_IMM(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size) && src_reg == dest_reg) {
host_arm_MOV_IMM(block, REG_TEMP, uop->imm_data << 8);
host_arm_UADD8(block, dest_reg, src_reg, REG_TEMP);
} else
fatal("ADD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_ADD_LSHIFT(codeblock_t *block, uop_t *uop)
{
host_arm_ADD_REG_LSL(block, uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real, uop->imm_data);
return 0;
}
static int
codegen_AND(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VAND_D(block, dest_reg, src_reg_a, src_reg_b);
} else if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_AND_REG_LSL(block, dest_reg, src_reg_a, src_reg_b, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b) && dest_reg == src_reg_a) {
host_arm_MVN_REG_LSL(block, REG_TEMP, src_reg_b, 16);
host_arm_BIC_REG_LSR(block, dest_reg, src_reg_a, REG_TEMP, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_MVN_REG_LSL(block, REG_TEMP, src_reg_b, 24);
host_arm_BIC_REG_LSR(block, dest_reg, src_reg_a, REG_TEMP, 24);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_MVN_REG_LSL(block, REG_TEMP, src_reg_b, 16);
host_arm_BIC_REG_LSR(block, dest_reg, src_reg_a, REG_TEMP, 24);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_MVN_REG_LSL(block, REG_TEMP, src_reg_b, 8);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 0x0000ff00);
host_arm_BIC_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_MVN_REG_LSL(block, REG_TEMP, src_reg_b, 0);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 0x0000ff00);
host_arm_BIC_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_AND_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_AND_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b)) {
host_arm_AND_REG_LSR(block, REG_TEMP, src_reg_a, src_reg_b, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_AND_REG_LSR(block, REG_TEMP, src_reg_b, src_reg_a, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b)) {
host_arm_AND_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else
fatal("AND %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_AND_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_AND_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size) && dest_reg == src_reg) {
host_arm_AND_IMM(block, dest_reg, src_reg, uop->imm_data | 0xffff0000);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size) && dest_reg == src_reg) {
host_arm_AND_IMM(block, dest_reg, src_reg, uop->imm_data | 0xffffff00);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size) && dest_reg == src_reg) {
host_arm_AND_IMM(block, dest_reg, src_reg, (uop->imm_data << 8) | 0xffff00ff);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_AND_IMM(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_AND_IMM(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSR(block, REG_TEMP, src_reg, 8);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else
fatal("AND_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_ANDN(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VBIC_D(block, dest_reg, src_reg_b, src_reg_a);
} else
fatal("ANDN %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_CALL_FUNC(codeblock_t *block, uop_t *uop)
{
host_arm_call(block, uop->p);
return 0;
}
static int
codegen_CALL_FUNC_RESULT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (!REG_IS_L(dest_size))
fatal("CALL_FUNC_RESULT %02x\n", uop->dest_reg_a_real);
host_arm_call(block, uop->p);
host_arm_MOV_REG(block, dest_reg, REG_R0);
return 0;
}
static int
codegen_CALL_INSTRUCTION_FUNC(codeblock_t *block, uop_t *uop)
{
host_arm_call(block, uop->p);
host_arm_TST_REG(block, REG_R0, REG_R0);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_CMP_IMM_JZ(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_arm_CMP_IMM(block, src_reg, uop->imm_data);
} else
fatal("CMP_IMM_JZ %02x\n", uop->src_reg_a_real);
host_arm_BEQ(block, (uintptr_t) uop->p);
return 0;
}
static int
codegen_CMP_IMM_JNZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_arm_CMP_IMM(block, src_reg, uop->imm_data);
} else if (REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_CMP_IMM(block, REG_TEMP, uop->imm_data);
} else
fatal("CMP_IMM_JNZ_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BNE_(block);
return 0;
}
static int
codegen_CMP_IMM_JZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_arm_CMP_IMM(block, src_reg, uop->imm_data);
} else if (REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_CMP_IMM(block, REG_TEMP, uop->imm_data);
} else
fatal("CMP_IMM_JZ_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BEQ_(block);
return 0;
}
static int
codegen_CMP_JB(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
uint32_t *jump_p;
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else
fatal("CMP_JB %02x\n", uop->src_reg_a_real);
jump_p = host_arm_BCC_(block);
*jump_p |= ((((uintptr_t) uop->p - (uintptr_t) jump_p) - 8) & 0x3fffffc) >> 2;
return 0;
}
static int
codegen_CMP_JNBE(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
uint32_t *jump_p;
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else
fatal("CMP_JNBE %02x\n", uop->src_reg_a_real);
jump_p = host_arm_BHI_(block);
*jump_p |= ((((uintptr_t) uop->p - (uintptr_t) jump_p) - 8) & 0x3fffffc) >> 2;
return 0;
}
static int
codegen_CMP_JNB_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JNB_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BCS_(block);
return 0;
}
static int
codegen_CMP_JNBE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JNBE_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BHI_(block);
return 0;
}
static int
codegen_CMP_JNL_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JNL_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BGE_(block);
return 0;
}
static int
codegen_CMP_JNLE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JNLE_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BGT_(block);
return 0;
}
static int
codegen_CMP_JNO_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JNO_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BVC_(block);
return 0;
}
static int
codegen_CMP_JNZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JNZ_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BNE_(block);
return 0;
}
static int
codegen_CMP_JB_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JB_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BCC_(block);
return 0;
}
static int
codegen_CMP_JBE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JBE_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BLS_(block);
return 0;
}
static int
codegen_CMP_JL_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JL_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BLT_(block);
return 0;
}
static int
codegen_CMP_JLE_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JLE_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BLE_(block);
return 0;
}
static int
codegen_CMP_JO_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JO_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BVS_(block);
return 0;
}
static int
codegen_CMP_JZ_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_CMP_REG(block, src_reg_a, src_reg_b);
} else if (REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 16);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 16);
} else if (REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg_a, 24);
host_arm_CMP_REG_LSL(block, REG_TEMP, src_reg_b, 24);
} else
fatal("CMP_JZ_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BEQ_(block);
return 0;
}
static int
codegen_FABS(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a)) {
host_arm_VABS_D(block, dest_reg, src_reg_a);
} else
fatal("codegen_FABS %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_FCHS(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a)) {
host_arm_VNEG_D(block, dest_reg, src_reg_a);
} else
fatal("codegen_FCHS %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_FSQRT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a)) {
host_arm_VSQRT_D(block, dest_reg, src_reg_a);
} else
fatal("codegen_FSQRT %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_FTST(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_W(dest_size) && REG_IS_D(src_size_a)) {
host_arm_VSUB_D(block, REG_D_TEMP, REG_D_TEMP, REG_D_TEMP);
host_arm_VCMP_D(block, src_reg_a, REG_D_TEMP);
host_arm_MOV_IMM(block, dest_reg, 0);
host_arm_VMRS_APSR(block);
host_arm_ORREQ_IMM(block, dest_reg, dest_reg, FPU_SW_C3);
host_arm_ORRCC_IMM(block, dest_reg, dest_reg, FPU_SW_C0);
host_arm_ORRVS_IMM(block, dest_reg, dest_reg, FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
} else
fatal("codegen_FTST %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_FADD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_arm_VADD_D(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("codegen_FADD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_FCOM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_W(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_arm_VCMP_D(block, src_reg_a, src_reg_b);
host_arm_MOV_IMM(block, dest_reg, 0);
host_arm_VMRS_APSR(block);
host_arm_ORREQ_IMM(block, dest_reg, dest_reg, FPU_SW_C3);
host_arm_ORRCC_IMM(block, dest_reg, dest_reg, FPU_SW_C0);
host_arm_ORRVS_IMM(block, dest_reg, dest_reg, FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
} else
fatal("codegen_FCOM %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_FDIV(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_arm_VDIV_D(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("codegen_FDIV %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_FMUL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_arm_VMUL_D(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("codegen_FMUL %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_FSUB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_D(dest_size) && REG_IS_D(src_size_a) && REG_IS_D(src_size_b)) {
host_arm_VSUB_D(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("codegen_FSUB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_FP_ENTER(codeblock_t *block, uop_t *uop)
{
uint32_t *branch_ptr;
if (!in_range(&cr0, &cpu_state))
fatal("codegen_FP_ENTER - out of range\n");
host_arm_LDR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cr0 - (uintptr_t) &cpu_state);
host_arm_TST_IMM(block, REG_TEMP, 0xc);
branch_ptr = host_arm_BEQ_(block);
host_arm_MOV_IMM(block, REG_TEMP, uop->imm_data);
host_arm_STR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.oldpc - (uintptr_t) &cpu_state);
host_arm_MOV_IMM(block, REG_ARG0, 7);
host_arm_call(block, x86_int);
host_arm_B(block, (uintptr_t) codegen_exit_rout);
*branch_ptr |= ((((uintptr_t) &block_write_data[block_pos] - (uintptr_t) branch_ptr) - 8) & 0x3fffffc) >> 2;
return 0;
}
static int
codegen_MMX_ENTER(codeblock_t *block, uop_t *uop)
{
uint32_t *branch_ptr;
if (!in_range(&cr0, &cpu_state))
fatal("codegen_MMX_ENTER - out of range\n");
host_arm_LDR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cr0 - (uintptr_t) &cpu_state);
host_arm_TST_IMM(block, REG_TEMP, 0xc);
branch_ptr = host_arm_BEQ_(block);
host_arm_MOV_IMM(block, REG_TEMP, uop->imm_data);
host_arm_STR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.oldpc - (uintptr_t) &cpu_state);
host_arm_MOV_IMM(block, REG_ARG0, 7);
host_arm_call(block, x86_int);
host_arm_B(block, (uintptr_t) codegen_exit_rout);
*branch_ptr |= ((((uintptr_t) &block_write_data[block_pos] - (uintptr_t) branch_ptr) - 8) & 0x3fffffc) >> 2;
host_arm_MOV_IMM(block, REG_TEMP, 0x01010101);
host_arm_STR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.tag[0] - (uintptr_t) &cpu_state);
host_arm_STR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.tag[4] - (uintptr_t) &cpu_state);
host_arm_MOV_IMM(block, REG_TEMP, 0);
host_arm_STR_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.TOP - (uintptr_t) &cpu_state);
host_arm_STRB_IMM(block, REG_TEMP, REG_CPUSTATE, (uintptr_t) &cpu_state.ismmx - (uintptr_t) &cpu_state);
return 0;
}
static int
codegen_JMP(codeblock_t *block, uop_t *uop)
{
host_arm_B(block, (uintptr_t) uop->p);
return 0;
}
static int
codegen_LOAD_FUNC_ARG0(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_ARG0, src_reg, 0);
} else
fatal("codegen_LOAD_FUNC_ARG0 %02x\n", uop->src_reg_a_real);
return 0;
}
static int
codegen_LOAD_FUNC_ARG1(codeblock_t *block, uop_t *uop)
{
fatal("codegen_LOAD_FUNC_ARG1 %02x\n", uop->src_reg_a_real);
return 0;
}
static int
codegen_LOAD_FUNC_ARG2(codeblock_t *block, uop_t *uop)
{
fatal("codegen_LOAD_FUNC_ARG2 %02x\n", uop->src_reg_a_real);
return 0;
}
static int
codegen_LOAD_FUNC_ARG3(codeblock_t *block, uop_t *uop)
{
fatal("codegen_LOAD_FUNC_ARG3 %02x\n", uop->src_reg_a_real);
return 0;
}
static int
codegen_LOAD_FUNC_ARG0_IMM(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, REG_ARG0, uop->imm_data);
return 0;
}
static int
codegen_LOAD_FUNC_ARG1_IMM(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, REG_ARG1, uop->imm_data);
return 0;
}
static int
codegen_LOAD_FUNC_ARG2_IMM(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, REG_ARG2, uop->imm_data);
return 0;
}
static int
codegen_LOAD_FUNC_ARG3_IMM(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, REG_ARG3, uop->imm_data);
return 0;
}
static int
codegen_LOAD_SEG(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (!REG_IS_W(src_size))
fatal("LOAD_SEG %02x %p\n", uop->src_reg_a_real, uop->p);
host_arm_UXTH(block, REG_ARG0, src_reg, 0);
host_arm_MOV_IMM(block, REG_ARG1, (uint32_t) uop->p);
host_arm_call(block, loadseg);
host_arm_TST_REG(block, REG_R0, REG_R0);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_LOAD_ABS(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_arm_ADD_IMM(block, REG_R0, seg_reg, uop->imm_data);
if (REG_IS_B(dest_size) || REG_IS_BH(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_byte);
} else if (REG_IS_W(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_word);
} else if (REG_IS_L(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_long);
} else
fatal("MEM_LOAD_ABS - %02x\n", uop->dest_reg_a_real);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
if (REG_IS_B(dest_size)) {
host_arm_BFI(block, dest_reg, REG_R0, 0, 8);
} else if (REG_IS_BH(dest_size)) {
host_arm_BFI(block, dest_reg, REG_R0, 8, 8);
} else if (REG_IS_W(dest_size)) {
host_arm_BFI(block, dest_reg, REG_R0, 0, 16);
} else if (REG_IS_L(dest_size)) {
host_arm_MOV_REG(block, dest_reg, REG_R0);
}
return 0;
}
static int
codegen_MEM_LOAD_REG(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
if (uop->imm_data)
host_arm_ADD_IMM(block, REG_R0, REG_R0, uop->imm_data);
if (REG_IS_B(dest_size) || REG_IS_BH(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_byte);
} else if (REG_IS_W(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_word);
} else if (REG_IS_L(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_long);
} else if (REG_IS_Q(dest_size)) {
host_arm_BL(block, (uintptr_t) codegen_mem_load_quad);
} else
fatal("MEM_LOAD_REG - %02x\n", uop->dest_reg_a_real);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
if (REG_IS_B(dest_size)) {
host_arm_BFI(block, dest_reg, REG_R0, 0, 8);
} else if (REG_IS_BH(dest_size)) {
host_arm_BFI(block, dest_reg, REG_R0, 8, 8);
} else if (REG_IS_W(dest_size)) {
host_arm_BFI(block, dest_reg, REG_R0, 0, 16);
} else if (REG_IS_L(dest_size)) {
host_arm_MOV_REG(block, dest_reg, REG_R0);
} else if (REG_IS_Q(dest_size)) {
host_arm_VMOV_D_D(block, dest_reg, REG_D_TEMP);
}
return 0;
}
static int
codegen_MEM_LOAD_SINGLE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (!REG_IS_D(dest_size))
fatal("MEM_LOAD_SINGLE - %02x\n", uop->dest_reg_a_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
if (uop->imm_data)
host_arm_ADD_IMM(block, REG_R0, REG_R0, uop->imm_data);
host_arm_BL(block, (uintptr_t) codegen_mem_load_single);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
host_arm_VCVT_D_S(block, dest_reg, REG_D_TEMP);
return 0;
}
static int
codegen_MEM_LOAD_DOUBLE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (!REG_IS_D(dest_size))
fatal("MEM_LOAD_DOUBLE - %02x\n", uop->dest_reg_a_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
if (uop->imm_data)
host_arm_ADD_IMM(block, REG_R0, REG_R0, uop->imm_data);
host_arm_BL(block, (uintptr_t) codegen_mem_load_double);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
host_arm_VMOV_D_D(block, dest_reg, REG_D_TEMP);
return 0;
}
static int
codegen_MEM_STORE_ABS(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_size = IREG_GET_SIZE(uop->src_reg_b_real);
host_arm_ADD_IMM(block, REG_R0, seg_reg, uop->imm_data);
if (REG_IS_B(src_size)) {
host_arm_MOV_REG(block, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_byte);
} else if (REG_IS_W(src_size)) {
host_arm_MOV_REG(block, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_word);
} else if (REG_IS_L(src_size)) {
host_arm_MOV_REG(block, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_long);
} else
fatal("MEM_STORE_ABS - %02x\n", uop->src_reg_b_real);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_REG(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_reg = HOST_REG_GET(uop->src_reg_c_real);
int src_size = IREG_GET_SIZE(uop->src_reg_c_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
if (uop->imm_data)
host_arm_ADD_IMM(block, REG_R0, REG_R0, uop->imm_data);
if (REG_IS_B(src_size)) {
host_arm_MOV_REG(block, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_byte);
} else if (REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSR(block, REG_R1, src_reg, 8);
host_arm_BL(block, (uintptr_t) codegen_mem_store_byte);
} else if (REG_IS_W(src_size)) {
host_arm_MOV_REG(block, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_word);
} else if (REG_IS_L(src_size)) {
host_arm_MOV_REG(block, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_long);
} else if (REG_IS_Q(src_size)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_quad);
} else
fatal("MEM_STORE_REG - %02x\n", uop->src_reg_c_real);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_IMM_8(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
host_arm_MOV_IMM(block, REG_R1, uop->imm_data);
host_arm_BL(block, (uintptr_t) codegen_mem_store_byte);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_IMM_16(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
host_arm_MOV_IMM(block, REG_R1, uop->imm_data);
host_arm_BL(block, (uintptr_t) codegen_mem_store_word);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_IMM_32(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
host_arm_MOV_IMM(block, REG_R1, uop->imm_data);
host_arm_BL(block, (uintptr_t) codegen_mem_store_long);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_SINGLE(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_reg = HOST_REG_GET(uop->src_reg_c_real);
int src_size = IREG_GET_SIZE(uop->src_reg_c_real);
if (!REG_IS_D(src_size))
fatal("MEM_STORE_REG - %02x\n", uop->dest_reg_a_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
if (uop->imm_data)
host_arm_ADD_IMM(block, REG_R0, REG_R0, uop->imm_data);
host_arm_VCVT_S_D(block, REG_D_TEMP, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_single);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MEM_STORE_DOUBLE(codeblock_t *block, uop_t *uop)
{
int seg_reg = HOST_REG_GET(uop->src_reg_a_real);
int addr_reg = HOST_REG_GET(uop->src_reg_b_real);
int src_reg = HOST_REG_GET(uop->src_reg_c_real);
int src_size = IREG_GET_SIZE(uop->src_reg_c_real);
if (!REG_IS_D(src_size))
fatal("MEM_STORE_REG - %02x\n", uop->dest_reg_a_real);
host_arm_ADD_REG(block, REG_R0, seg_reg, addr_reg);
if (uop->imm_data)
host_arm_ADD_IMM(block, REG_R0, REG_R0, uop->imm_data);
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg);
host_arm_BL(block, (uintptr_t) codegen_mem_store_double);
host_arm_TST_REG(block, REG_R1, REG_R1);
host_arm_BNE(block, (uintptr_t) codegen_exit_rout);
return 0;
}
static int
codegen_MOV(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_LSL(block, dest_reg, src_reg, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_BFI(block, dest_reg, src_reg, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_BFI(block, dest_reg, src_reg, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_B(src_size)) {
host_arm_BFI(block, dest_reg, src_reg, 8, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSR(block, REG_TEMP, src_reg, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSR(block, REG_TEMP, src_reg, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else if (REG_IS_D(dest_size) && REG_IS_D(src_size)) {
host_arm_VMOV_D_D(block, dest_reg, src_reg);
} else if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
host_arm_VMOV_D_D(block, dest_reg, src_reg);
} else
fatal("MOV %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_MOV_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
if (REG_IS_L(dest_size)) {
host_arm_MOV_IMM(block, dest_reg, uop->imm_data);
} else if (REG_IS_W(dest_size)) {
host_arm_MOVW_IMM(block, REG_TEMP, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size)) {
host_arm_AND_IMM(block, dest_reg, dest_reg, ~0x000000ff);
host_arm_ORR_IMM(block, dest_reg, dest_reg, uop->imm_data);
} else if (REG_IS_BH(dest_size)) {
host_arm_AND_IMM(block, dest_reg, dest_reg, ~0x0000ff00);
host_arm_ORR_IMM(block, dest_reg, dest_reg, uop->imm_data << 8);
} else
fatal("MOV_IMM %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_MOV_PTR(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, uop->dest_reg_a_real, (uintptr_t) uop->p);
return 0;
}
static int
codegen_MOVSX(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_B(src_size)) {
host_arm_SXTB(block, dest_reg, src_reg, 0);
} else if (REG_IS_L(dest_size) && REG_IS_BH(src_size)) {
host_arm_SXTB(block, dest_reg, src_reg, 8);
} else if (REG_IS_L(dest_size) && REG_IS_W(src_size)) {
host_arm_SXTH(block, dest_reg, src_reg, 0);
} else if (REG_IS_W(dest_size) && REG_IS_B(src_size)) {
host_arm_SXTB(block, REG_TEMP, src_reg, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_W(dest_size) && REG_IS_BH(src_size)) {
host_arm_SXTB(block, REG_TEMP, src_reg, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else
fatal("MOVSX %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_MOVZX(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_IMM(block, REG_TEMP, 0);
host_arm_VMOV_D_64(block, dest_reg, src_reg, REG_TEMP);
} else if (REG_IS_L(dest_size) && REG_IS_Q(src_size)) {
host_arm_VMOV_32_S(block, dest_reg, src_reg);
} else if (REG_IS_L(dest_size) && REG_IS_B(src_size)) {
host_arm_UXTB(block, dest_reg, src_reg, 0);
} else if (REG_IS_L(dest_size) && REG_IS_BH(src_size)) {
host_arm_UXTB(block, dest_reg, src_reg, 8);
} else if (REG_IS_L(dest_size) && REG_IS_W(src_size)) {
host_arm_UXTH(block, dest_reg, src_reg, 0);
} else if (REG_IS_W(dest_size) && REG_IS_B(src_size)) {
if (src_reg == dest_reg)
host_arm_BIC_IMM(block, dest_reg, dest_reg, 0xff00);
else {
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
}
} else if (REG_IS_W(dest_size) && REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSR(block, REG_TEMP, src_reg, 8);
host_arm_BIC_IMM(block, dest_reg, dest_reg, 0xff00);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else
fatal("MOVZX %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static double
int64_to_double(int64_t a)
{
return (double) a;
}
static int
codegen_MOV_DOUBLE_INT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_D(dest_size) && REG_IS_L(src_size)) {
host_arm_VMOV_S_32(block, REG_D_TEMP, src_reg);
host_arm_VCVT_D_IS(block, dest_reg, REG_D_TEMP);
} else if (REG_IS_D(dest_size) && REG_IS_W(src_size)) {
host_arm_SXTH(block, REG_TEMP, src_reg, 0);
host_arm_VMOV_S_32(block, REG_D_TEMP, REG_TEMP);
host_arm_VCVT_D_IS(block, dest_reg, REG_D_TEMP);
} else if (REG_IS_D(dest_size) && REG_IS_Q(src_size)) {
/*ARMv7 has no instructions to convert a 64-bit integer to a double.
For simplicity, call a C function and let the compiler do it.*/
host_arm_VMOV_64_D(block, REG_R0, REG_R1, src_reg);
host_arm_BL(block, (uintptr_t) int64_to_double); /*Input - R0/R1, Output - D0*/
host_arm_VMOV_D_D(block, dest_reg, REG_D0);
} else
fatal("MOV_DOUBLE_INT %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_MOV_INT_DOUBLE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_D(src_size)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg);
host_arm_BL(block, (uintptr_t) codegen_fp_round);
host_arm_VMOV_32_S(block, dest_reg, REG_D_TEMP);
} else if (REG_IS_W(dest_size) && REG_IS_D(src_size)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg);
host_arm_BL(block, (uintptr_t) codegen_fp_round);
host_arm_VMOV_32_S(block, REG_TEMP, REG_D_TEMP);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else
fatal("MOV_INT_DOUBLE %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int64_t
x87_fround64(double b)
{
int64_t a;
int64_t c;
switch ((cpu_state.npxc >> 10) & 3) {
case 0: /*Nearest*/
a = (int64_t) floor(b);
c = (int64_t) floor(b + 1.0);
if ((b - a) < (c - b))
return a;
else if ((b - a) > (c - b))
return c;
else
return (a & 1) ? c : a;
case 1: /*Down*/
return (int64_t) floor(b);
case 2: /*Up*/
return (int64_t) ceil(b);
case 3: /*Chop*/
return (int64_t) b;
}
return 0;
}
static int
codegen_MOV_INT_DOUBLE_64(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_64_reg = HOST_REG_GET(uop->src_reg_b_real);
int tag_reg = HOST_REG_GET(uop->src_reg_c_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
int src_64_size = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_D(src_size) && REG_IS_Q(src_64_size)) {
uint32_t *branch_offset;
/*If TAG_UINT64 is set then the source is MM[]. Otherwise it is a double in ST()*/
host_arm_VMOV_D_D(block, dest_reg, src_64_reg);
host_arm_TST_IMM(block, tag_reg, TAG_UINT64);
branch_offset = host_arm_BNE_(block);
/*VFP/NEON has no instructions to convert a float to 64-bit integer,
so call out to C.*/
host_arm_VMOV_D_D(block, REG_D0, src_reg);
host_arm_call(block, x87_fround64);
host_arm_VMOV_D_64(block, REG_D_TEMP, REG_R0, REG_R1);
*branch_offset |= ((((uintptr_t) &block_write_data[block_pos] - (uintptr_t) branch_offset) - 8) & 0x3fffffc) >> 2;
} else
fatal("MOV_INT_DOUBLE_64 %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_MOV_REG_PTR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_arm_MOV_IMM(block, REG_TEMP, (uintptr_t) uop->p);
if (REG_IS_L(dest_size)) {
host_arm_LDR_IMM(block, dest_reg, REG_TEMP, 0);
} else
fatal("MOV_REG_PTR %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_MOVZX_REG_PTR_8(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_arm_MOV_IMM(block, REG_TEMP, (uintptr_t) uop->p);
if (REG_IS_L(dest_size)) {
host_arm_LDRB_IMM(block, dest_reg, REG_TEMP, 0);
} else if (REG_IS_W(dest_size)) {
host_arm_LDRB_IMM(block, REG_TEMP, REG_TEMP, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size)) {
host_arm_LDRB_IMM(block, REG_TEMP, REG_TEMP, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else
fatal("MOVZX_REG_PTR_8 %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_MOVZX_REG_PTR_16(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
host_arm_MOV_IMM(block, REG_TEMP, (uintptr_t) uop->p);
if (REG_IS_L(dest_size)) {
host_arm_LDRH_IMM(block, dest_reg, REG_TEMP, 0);
} else if (REG_IS_W(dest_size)) {
host_arm_LDRH_IMM(block, REG_TEMP, REG_TEMP, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else
fatal("MOVZX_REG_PTR_16 %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_NOP(codeblock_t *block, uop_t *uop)
{
return 0;
}
static int
codegen_OR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VORR_D(block, dest_reg, src_reg_a, src_reg_b);
} else if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_ORR_REG_LSL(block, dest_reg, src_reg_a, src_reg_b, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_ORR_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 0);
host_arm_ORR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 8);
host_arm_ORR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 0);
host_arm_ORR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 8);
host_arm_ORR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 8);
} else
fatal("OR %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_OR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_ORR_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size) && dest_reg == src_reg) {
host_arm_ORR_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size) && dest_reg == src_reg) {
host_arm_ORR_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size) && dest_reg == src_reg) {
host_arm_ORR_IMM(block, dest_reg, src_reg, uop->imm_data << 8);
} else
fatal("OR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PACKSSWB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_Q_TEMP, src_reg_a);
host_arm_VMOV_D_D(block, REG_Q_TEMP_2, src_reg_b);
host_arm_VQMOVN_S16(block, dest_reg, REG_Q_TEMP);
host_arm_VQMOVN_S16(block, REG_D_TEMP, REG_Q_TEMP_2);
host_arm_VZIP_D32(block, dest_reg, REG_D_TEMP);
} else
fatal("PACKSSWB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PACKSSDW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_Q_TEMP, src_reg_a);
host_arm_VMOV_D_D(block, REG_Q_TEMP_2, src_reg_b);
host_arm_VQMOVN_S32(block, dest_reg, REG_Q_TEMP);
host_arm_VQMOVN_S32(block, REG_D_TEMP, REG_Q_TEMP_2);
host_arm_VZIP_D32(block, dest_reg, REG_D_TEMP);
} else
fatal("PACKSSDW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PACKUSWB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_Q_TEMP, src_reg_a);
host_arm_VMOV_D_D(block, REG_Q_TEMP_2, src_reg_b);
host_arm_VQMOVN_U16(block, dest_reg, REG_Q_TEMP);
host_arm_VQMOVN_U16(block, REG_D_TEMP, REG_Q_TEMP_2);
host_arm_VZIP_D32(block, dest_reg, REG_D_TEMP);
} else
fatal("PACKUSWB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VADD_I8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VADD_I16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VADD_I32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQADD_S8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQADD_S16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDUSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQADD_U8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDUSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PADDUSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQADD_U16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PADDUSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PCMPEQB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCEQ_I8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PCMPEQB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PCMPEQW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCEQ_I16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PCMPEQW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PCMPEQD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCEQ_I32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PCMPEQD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PCMPGTB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCGT_S8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PCMPGTB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PCMPGTW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCGT_S16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PCMPGTW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PCMPGTD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCGT_S32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PCMPGTD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PF2ID(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
host_arm_VCVT_S32_F32(block, dest_reg, src_reg_a);
} else
fatal("PF2ID %02x %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_PFADD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VADD_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFADD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFCMPEQ(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCEQ_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFCMPEQ %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFCMPGE(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCGE_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFCMPGE %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFCMPGT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VCGT_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFCMPGT %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFMAX(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMAX_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFMAX %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFMIN(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMIN_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFMIN %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFMUL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMUL_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFMUL %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PFRCP(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
/*TODO: This could be improved (use VRECPE/VRECPS)*/
host_arm_VMOV_F32_ONE(block, REG_D_TEMP);
host_arm_VDIV_S(block, dest_reg, REG_D_TEMP, src_reg_a);
host_arm_VDUP_32(block, dest_reg, dest_reg, 0);
} else
fatal("PFRCP %02x %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_PFRSQRT(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
/*TODO: This could be improved (use VRSQRTE/VRSQRTS)*/
host_arm_VSQRT_S(block, REG_D_TEMP, src_reg_a);
host_arm_VMOV_F32_ONE(block, REG_D_TEMP);
host_arm_VDIV_S(block, dest_reg, dest_reg, REG_D_TEMP);
host_arm_VDUP_32(block, dest_reg, dest_reg, 0);
} else
fatal("PFRSQRT %02x %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_PFSUB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VSUB_F32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PFSUB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PI2FD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a)) {
host_arm_VCVT_F32_S32(block, dest_reg, src_reg_a);
} else
fatal("PI2FD %02x %02x\n", uop->dest_reg_a_real);
return 0;
}
static int
codegen_PMADDWD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMULL_S16(block, REG_Q_TEMP, src_reg_a, src_reg_b);
host_arm_VPADDL_Q_S32(block, REG_Q_TEMP, REG_Q_TEMP);
host_arm_VMOVN_I64(block, dest_reg, REG_Q_TEMP);
} else
fatal("PMULHW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PMULHW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMULL_S16(block, REG_Q_TEMP, src_reg_a, src_reg_b);
host_arm_VSHRN_32(block, dest_reg, REG_Q_TEMP, 16);
} else
fatal("PMULHW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PMULLW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMUL_S16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PMULLW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSLLW_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 15)
host_arm_VEOR_D(block, dest_reg, dest_reg, dest_reg);
else
host_arm_VSHL_D_IMM_16(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSLLW_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSLLD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 31)
host_arm_VEOR_D(block, dest_reg, dest_reg, dest_reg);
else
host_arm_VSHL_D_IMM_32(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSLLD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSLLQ_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 63)
host_arm_VEOR_D(block, dest_reg, dest_reg, dest_reg);
else
host_arm_VSHL_D_IMM_64(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSLLQ_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSRAW_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 15)
host_arm_VSHR_D_S16(block, dest_reg, src_reg, 15);
else
host_arm_VSHR_D_S16(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSRAW_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSRAD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 31)
host_arm_VSHR_D_S32(block, dest_reg, src_reg, 31);
else
host_arm_VSHR_D_S32(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSRAD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSRAQ_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 63)
host_arm_VSHR_D_S64(block, dest_reg, src_reg, 63);
else
host_arm_VSHR_D_S64(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSRAQ_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSRLW_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 15)
host_arm_VEOR_D(block, dest_reg, dest_reg, dest_reg);
else
host_arm_VSHR_D_U16(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSRLW_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSRLD_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 31)
host_arm_VEOR_D(block, dest_reg, dest_reg, dest_reg);
else
host_arm_VSHR_D_U32(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSRLD_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSRLQ_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size)) {
if (uop->imm_data == 0)
host_arm_VMOV_D_D(block, dest_reg, src_reg);
else if (uop->imm_data > 63)
host_arm_VEOR_D(block, dest_reg, dest_reg, dest_reg);
else
host_arm_VSHR_D_U64(block, dest_reg, src_reg, uop->imm_data);
} else
fatal("PSRLQ_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_PSUBB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VSUB_I8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSUBW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VSUB_I16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSUBD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VSUB_I32(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSUBSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQSUB_S8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSUBSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQSUB_S16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSUBUSB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQSUB_U8(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBUSB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PSUBUSW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VQSUB_U16(block, dest_reg, src_reg_a, src_reg_b);
} else
fatal("PSUBUSW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PUNPCKHBW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg_b);
if (dest_reg != src_reg_a)
host_arm_VMOV_D_D(block, dest_reg, src_reg_a);
host_arm_VZIP_D8(block, dest_reg, REG_D_TEMP);
host_arm_VMOV_D_D(block, dest_reg, REG_D_TEMP);
} else
fatal("PUNPCKHBW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PUNPCKHWD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg_b);
if (dest_reg != src_reg_a)
host_arm_VMOV_D_D(block, dest_reg, src_reg_a);
host_arm_VZIP_D16(block, dest_reg, REG_D_TEMP);
host_arm_VMOV_D_D(block, dest_reg, REG_D_TEMP);
} else
fatal("PUNPCKHWD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PUNPCKHDQ(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg_b);
if (dest_reg != src_reg_a)
host_arm_VMOV_D_D(block, dest_reg, src_reg_a);
host_arm_VZIP_D32(block, dest_reg, REG_D_TEMP);
host_arm_VMOV_D_D(block, dest_reg, REG_D_TEMP);
} else
fatal("PUNPCKHDQ %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PUNPCKLBW(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg_b);
if (dest_reg != src_reg_a)
host_arm_VMOV_D_D(block, dest_reg, src_reg_a);
host_arm_VZIP_D8(block, dest_reg, REG_D_TEMP);
} else
fatal("PUNPCKLBW %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PUNPCKLWD(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg_b);
if (dest_reg != src_reg_a)
host_arm_VMOV_D_D(block, dest_reg, src_reg_a);
host_arm_VZIP_D16(block, dest_reg, REG_D_TEMP);
} else
fatal("PUNPCKLWD %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_PUNPCKLDQ(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VMOV_D_D(block, REG_D_TEMP, src_reg_b);
if (dest_reg != src_reg_a)
host_arm_VMOV_D_D(block, dest_reg, src_reg_a);
host_arm_VZIP_D32(block, dest_reg, REG_D_TEMP);
} else
fatal("PUNPCKLDQ %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_ROL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_RSB_IMM(block, REG_TEMP2, shift_reg, 32);
host_arm_MOV_REG_ROR_REG(block, dest_reg, src_reg, REG_TEMP2);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_RSB_IMM(block, REG_TEMP2, shift_reg, 16);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 16);
host_arm_MOV_REG_ROR_REG(block, REG_TEMP, REG_TEMP, REG_TEMP2);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_RSB_IMM(block, REG_TEMP2, shift_reg, 8);
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_AND_IMM(block, REG_TEMP2, REG_TEMP2, 7);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, REG_TEMP2);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_RSB_IMM(block, REG_TEMP2, shift_reg, 8);
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_AND_IMM(block, REG_TEMP2, REG_TEMP2, 7);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, REG_TEMP2);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("ROL %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_ROL_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (!(uop->imm_data & 31)) {
if (src_reg != dest_reg)
host_arm_MOV_REG(block, dest_reg, src_reg);
} else {
host_arm_MOV_REG_ROR(block, dest_reg, src_reg, 32 - (uop->imm_data & 31));
}
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if ((uop->imm_data & 15) == 0) {
if (src_reg != dest_reg)
host_arm_BFI(block, dest_reg, src_reg, 0, 16);
} else {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 16);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 16 - (uop->imm_data & 15));
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
}
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if ((uop->imm_data & 7) == 0) {
if (src_reg != dest_reg)
host_arm_BFI(block, dest_reg, src_reg, 0, 8);
} else {
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 8 - (uop->imm_data & 7));
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
}
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
if ((uop->imm_data & 7) == 0) {
if (src_reg != dest_reg)
fatal("ROL_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
} else {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 8 - (uop->imm_data & 7));
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
}
} else
fatal("ROL_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_ROR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_ROR_REG(block, dest_reg, src_reg, shift_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_AND_IMM(block, REG_TEMP2, shift_reg, 15);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 16);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, REG_TEMP2);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_AND_IMM(block, REG_TEMP2, shift_reg, 7);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, REG_TEMP2);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_AND_IMM(block, REG_TEMP2, shift_reg, 7);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, REG_TEMP2);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("ROR %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_ROR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
if (!(uop->imm_data & 31)) {
if (src_reg != dest_reg)
host_arm_MOV_REG(block, dest_reg, src_reg);
} else {
host_arm_MOV_REG_ROR(block, dest_reg, src_reg, uop->imm_data & 31);
}
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
if ((uop->imm_data & 15) == 0) {
if (src_reg != dest_reg)
fatal("ROR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
} else {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 16);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, uop->imm_data & 15);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
}
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
if ((uop->imm_data & 7) == 0) {
if (src_reg != dest_reg)
fatal("ROR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
} else {
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, uop->imm_data & 7);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
}
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
if ((uop->imm_data & 7) == 0) {
if (src_reg != dest_reg)
fatal("ROR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
} else {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_ORR_REG_LSL(block, REG_TEMP, REG_TEMP, REG_TEMP, 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, uop->imm_data & 7);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
}
} else
fatal("ROR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_SAR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_ASR_REG(block, dest_reg, src_reg, shift_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, 16);
host_arm_MOV_REG_ASR_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_UXTH(block, REG_TEMP, REG_TEMP, 16);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, 24);
host_arm_MOV_REG_ASR_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_UXTB(block, REG_TEMP, REG_TEMP, 24);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, 16);
host_arm_MOV_REG_ASR_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_UXTB(block, REG_TEMP, REG_TEMP, 24);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SAR %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_SAR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_ASR(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, 16);
host_arm_MOV_REG_ASR(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_UXTH(block, REG_TEMP, REG_TEMP, 16);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, 24);
host_arm_MOV_REG_ASR(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_UXTB(block, REG_TEMP, REG_TEMP, 24);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, 16);
host_arm_MOV_REG_ASR(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_UXTB(block, REG_TEMP, REG_TEMP, 24);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SAR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_SHL(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_LSL_REG(block, dest_reg, src_reg, shift_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_MOV_REG_LSL_REG(block, REG_TEMP, src_reg, shift_reg);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_MOV_REG_LSL_REG(block, REG_TEMP, src_reg, shift_reg);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_MOV_REG_LSL_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SHL %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_SHL_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_LSL(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_MOV_REG_LSL(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_MOV_REG_LSL(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SHL_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_SHR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int shift_reg = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_LSR_REG(block, dest_reg, src_reg, shift_reg);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_MOV_REG_LSR_REG(block, REG_TEMP, REG_TEMP, shift_reg);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SHR %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_SHR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_MOV_REG_LSR(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_UXTH(block, REG_TEMP, src_reg, 0);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 0);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_UXTB(block, REG_TEMP, src_reg, 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SHR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_STORE_PTR_IMM(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, REG_R0, uop->imm_data);
if (in_range(uop->p, &cpu_state))
host_arm_STR_IMM(block, REG_R0, REG_CPUSTATE, (uintptr_t) uop->p - (uintptr_t) &cpu_state);
else
fatal("codegen_STORE_PTR_IMM - not in range\n");
return 0;
}
static int
codegen_STORE_PTR_IMM_8(codeblock_t *block, uop_t *uop)
{
host_arm_MOV_IMM(block, REG_R0, uop->imm_data);
if (in_range(uop->p, &cpu_state))
host_arm_STRB_IMM(block, REG_R0, REG_CPUSTATE, (uintptr_t) uop->p - (uintptr_t) &cpu_state);
else
fatal("codegen_STORE_PTR_IMM - not in range\n");
return 0;
}
static int
codegen_SUB(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_SUB_REG_LSL(block, dest_reg, src_reg_a, src_reg_b, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b)) {
host_arm_SUB_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_SUB_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b)) {
host_arm_SUB_REG_LSR(block, REG_TEMP, src_reg_a, src_reg_b, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_RSB_REG_LSR(block, REG_TEMP, src_reg_b, src_reg_a, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b)) {
host_arm_SUB_REG_LSL(block, REG_TEMP, src_reg_a, src_reg_b, 0);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b)) {
host_arm_RSB_REG_LSR(block, REG_TEMP, src_reg_b, src_reg_a, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b)) {
host_arm_MOV_REG_LSR(block, REG_TEMP, src_reg_a, 8);
host_arm_SUB_REG_LSR(block, REG_TEMP, REG_TEMP, src_reg_b, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SUB %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
# if 0
host_arm_SUB_REG_LSL(block, uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real, 0);
return 0;
# endif
}
static int
codegen_SUB_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_SUB_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size)) {
host_arm_SUB_IMM(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 16);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size)) {
host_arm_SUB_IMM(block, REG_TEMP, src_reg, uop->imm_data);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_B(dest_size) && REG_IS_BH(src_size)) {
host_arm_SUB_IMM(block, REG_TEMP, src_reg, uop->imm_data << 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 0, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size)) {
host_arm_SUB_IMM(block, REG_TEMP, src_reg, uop->imm_data << 8);
host_arm_MOV_REG_LSR(block, REG_TEMP, REG_TEMP, 8);
host_arm_BFI(block, dest_reg, REG_TEMP, 8, 8);
} else
fatal("SUB_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
static int
codegen_TEST_JNS_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_arm_TST_IMM(block, src_reg, 1 << 31);
} else if (REG_IS_W(src_size)) {
host_arm_TST_IMM(block, src_reg, 1 << 15);
} else if (REG_IS_B(src_size)) {
host_arm_TST_IMM(block, src_reg, 1 << 7);
} else
fatal("TEST_JNS_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BEQ_(block);
return 0;
}
static int
codegen_TEST_JS_DEST(codeblock_t *block, uop_t *uop)
{
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(src_size)) {
host_arm_TST_IMM(block, src_reg, 1 << 31);
} else if (REG_IS_W(src_size)) {
host_arm_TST_IMM(block, src_reg, 1 << 15);
} else if (REG_IS_B(src_size)) {
host_arm_TST_IMM(block, src_reg, 1 << 7);
} else
fatal("TEST_JS_DEST %02x\n", uop->src_reg_a_real);
uop->p = host_arm_BNE_(block);
return 0;
}
static int
codegen_XOR(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg_a = HOST_REG_GET(uop->src_reg_a_real);
int src_reg_b = HOST_REG_GET(uop->src_reg_b_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size_a = IREG_GET_SIZE(uop->src_reg_a_real);
int src_size_b = IREG_GET_SIZE(uop->src_reg_b_real);
if (REG_IS_Q(dest_size) && REG_IS_Q(src_size_a) && REG_IS_Q(src_size_b)) {
host_arm_VEOR_D(block, dest_reg, src_reg_a, src_reg_b);
} else if (REG_IS_L(dest_size) && REG_IS_L(src_size_a) && REG_IS_L(src_size_b)) {
host_arm_EOR_REG_LSL(block, dest_reg, src_reg_a, src_reg_b, 0);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size_a) && REG_IS_W(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTH(block, REG_TEMP, src_reg_b, 0);
host_arm_EOR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 0);
host_arm_EOR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 8);
host_arm_EOR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 0);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_B(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 0);
host_arm_EOR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 8);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size_a) && REG_IS_BH(src_size_b) && dest_reg == src_reg_a) {
host_arm_UXTB(block, REG_TEMP, src_reg_b, 8);
host_arm_EOR_REG_LSL(block, dest_reg, src_reg_a, REG_TEMP, 8);
} else
fatal("XOR %02x %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real, uop->src_reg_b_real);
return 0;
}
static int
codegen_XOR_IMM(codeblock_t *block, uop_t *uop)
{
int dest_reg = HOST_REG_GET(uop->dest_reg_a_real);
int src_reg = HOST_REG_GET(uop->src_reg_a_real);
int dest_size = IREG_GET_SIZE(uop->dest_reg_a_real);
int src_size = IREG_GET_SIZE(uop->src_reg_a_real);
if (REG_IS_L(dest_size) && REG_IS_L(src_size)) {
host_arm_EOR_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_W(dest_size) && REG_IS_W(src_size) && dest_reg == src_reg) {
host_arm_EOR_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_B(dest_size) && REG_IS_B(src_size) && dest_reg == src_reg) {
host_arm_EOR_IMM(block, dest_reg, src_reg, uop->imm_data);
} else if (REG_IS_BH(dest_size) && REG_IS_BH(src_size) && dest_reg == src_reg) {
host_arm_EOR_IMM(block, dest_reg, src_reg, uop->imm_data << 8);
} else
fatal("XOR_IMM %02x %02x\n", uop->dest_reg_a_real, uop->src_reg_a_real);
return 0;
}
const uOpFn uop_handlers[UOP_MAX] = {
[UOP_CALL_FUNC & UOP_MASK] = codegen_CALL_FUNC,
[UOP_CALL_FUNC_RESULT &
UOP_MASK]
= codegen_CALL_FUNC_RESULT,
[UOP_CALL_INSTRUCTION_FUNC &
UOP_MASK]
= codegen_CALL_INSTRUCTION_FUNC,
[UOP_JMP &
UOP_MASK]
= codegen_JMP,
[UOP_LOAD_SEG &
UOP_MASK]
= codegen_LOAD_SEG,
[UOP_LOAD_FUNC_ARG_0 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG0,
[UOP_LOAD_FUNC_ARG_1 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG1,
[UOP_LOAD_FUNC_ARG_2 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG2,
[UOP_LOAD_FUNC_ARG_3 &
UOP_MASK]
= codegen_LOAD_FUNC_ARG3,
[UOP_LOAD_FUNC_ARG_0_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG0_IMM,
[UOP_LOAD_FUNC_ARG_1_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG1_IMM,
[UOP_LOAD_FUNC_ARG_2_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG2_IMM,
[UOP_LOAD_FUNC_ARG_3_IMM &
UOP_MASK]
= codegen_LOAD_FUNC_ARG3_IMM,
[UOP_STORE_P_IMM &
UOP_MASK]
= codegen_STORE_PTR_IMM,
[UOP_STORE_P_IMM_8 &
UOP_MASK]
= codegen_STORE_PTR_IMM_8,
[UOP_MEM_LOAD_ABS &
UOP_MASK]
= codegen_MEM_LOAD_ABS,
[UOP_MEM_LOAD_REG &
UOP_MASK]
= codegen_MEM_LOAD_REG,
[UOP_MEM_LOAD_SINGLE &
UOP_MASK]
= codegen_MEM_LOAD_SINGLE,
[UOP_MEM_LOAD_DOUBLE &
UOP_MASK]
= codegen_MEM_LOAD_DOUBLE,
[UOP_MEM_STORE_ABS &
UOP_MASK]
= codegen_MEM_STORE_ABS,
[UOP_MEM_STORE_REG &
UOP_MASK]
= codegen_MEM_STORE_REG,
[UOP_MEM_STORE_IMM_8 &
UOP_MASK]
= codegen_MEM_STORE_IMM_8,
[UOP_MEM_STORE_IMM_16 &
UOP_MASK]
= codegen_MEM_STORE_IMM_16,
[UOP_MEM_STORE_IMM_32 &
UOP_MASK]
= codegen_MEM_STORE_IMM_32,
[UOP_MEM_STORE_SINGLE &
UOP_MASK]
= codegen_MEM_STORE_SINGLE,
[UOP_MEM_STORE_DOUBLE &
UOP_MASK]
= codegen_MEM_STORE_DOUBLE,
[UOP_MOV &
UOP_MASK]
= codegen_MOV,
[UOP_MOV_PTR &
UOP_MASK]
= codegen_MOV_PTR,
[UOP_MOV_IMM &
UOP_MASK]
= codegen_MOV_IMM,
[UOP_MOVSX &
UOP_MASK]
= codegen_MOVSX,
[UOP_MOVZX &
UOP_MASK]
= codegen_MOVZX,
[UOP_MOV_DOUBLE_INT &
UOP_MASK]
= codegen_MOV_DOUBLE_INT,
[UOP_MOV_INT_DOUBLE &
UOP_MASK]
= codegen_MOV_INT_DOUBLE,
[UOP_MOV_INT_DOUBLE_64 &
UOP_MASK]
= codegen_MOV_INT_DOUBLE_64,
[UOP_MOV_REG_PTR &
UOP_MASK]
= codegen_MOV_REG_PTR,
[UOP_MOVZX_REG_PTR_8 &
UOP_MASK]
= codegen_MOVZX_REG_PTR_8,
[UOP_MOVZX_REG_PTR_16 &
UOP_MASK]
= codegen_MOVZX_REG_PTR_16,
[UOP_ADD &
UOP_MASK]
= codegen_ADD,
[UOP_ADD_IMM &
UOP_MASK]
= codegen_ADD_IMM,
[UOP_ADD_LSHIFT &
UOP_MASK]
= codegen_ADD_LSHIFT,
[UOP_AND &
UOP_MASK]
= codegen_AND,
[UOP_AND_IMM &
UOP_MASK]
= codegen_AND_IMM,
[UOP_ANDN &
UOP_MASK]
= codegen_ANDN,
[UOP_OR &
UOP_MASK]
= codegen_OR,
[UOP_OR_IMM &
UOP_MASK]
= codegen_OR_IMM,
[UOP_SUB &
UOP_MASK]
= codegen_SUB,
[UOP_SUB_IMM &
UOP_MASK]
= codegen_SUB_IMM,
[UOP_XOR &
UOP_MASK]
= codegen_XOR,
[UOP_XOR_IMM &
UOP_MASK]
= codegen_XOR_IMM,
[UOP_SAR &
UOP_MASK]
= codegen_SAR,
[UOP_SAR_IMM &
UOP_MASK]
= codegen_SAR_IMM,
[UOP_SHL &
UOP_MASK]
= codegen_SHL,
[UOP_SHL_IMM &
UOP_MASK]
= codegen_SHL_IMM,
[UOP_SHR &
UOP_MASK]
= codegen_SHR,
[UOP_SHR_IMM &
UOP_MASK]
= codegen_SHR_IMM,
[UOP_ROL &
UOP_MASK]
= codegen_ROL,
[UOP_ROL_IMM &
UOP_MASK]
= codegen_ROL_IMM,
[UOP_ROR &
UOP_MASK]
= codegen_ROR,
[UOP_ROR_IMM &
UOP_MASK]
= codegen_ROR_IMM,
[UOP_CMP_IMM_JZ &
UOP_MASK]
= codegen_CMP_IMM_JZ,
[UOP_CMP_JB &
UOP_MASK]
= codegen_CMP_JB,
[UOP_CMP_JNBE &
UOP_MASK]
= codegen_CMP_JNBE,
[UOP_CMP_JNB_DEST &
UOP_MASK]
= codegen_CMP_JNB_DEST,
[UOP_CMP_JNBE_DEST &
UOP_MASK]
= codegen_CMP_JNBE_DEST,
[UOP_CMP_JNL_DEST &
UOP_MASK]
= codegen_CMP_JNL_DEST,
[UOP_CMP_JNLE_DEST &
UOP_MASK]
= codegen_CMP_JNLE_DEST,
[UOP_CMP_JNO_DEST &
UOP_MASK]
= codegen_CMP_JNO_DEST,
[UOP_CMP_JNZ_DEST &
UOP_MASK]
= codegen_CMP_JNZ_DEST,
[UOP_CMP_JB_DEST &
UOP_MASK]
= codegen_CMP_JB_DEST,
[UOP_CMP_JBE_DEST &
UOP_MASK]
= codegen_CMP_JBE_DEST,
[UOP_CMP_JL_DEST &
UOP_MASK]
= codegen_CMP_JL_DEST,
[UOP_CMP_JLE_DEST &
UOP_MASK]
= codegen_CMP_JLE_DEST,
[UOP_CMP_JO_DEST &
UOP_MASK]
= codegen_CMP_JO_DEST,
[UOP_CMP_JZ_DEST &
UOP_MASK]
= codegen_CMP_JZ_DEST,
[UOP_CMP_IMM_JNZ_DEST &
UOP_MASK]
= codegen_CMP_IMM_JNZ_DEST,
[UOP_CMP_IMM_JZ_DEST &
UOP_MASK]
= codegen_CMP_IMM_JZ_DEST,
[UOP_TEST_JNS_DEST &
UOP_MASK]
= codegen_TEST_JNS_DEST,
[UOP_TEST_JS_DEST &
UOP_MASK]
= codegen_TEST_JS_DEST,
[UOP_FP_ENTER &
UOP_MASK]
= codegen_FP_ENTER,
[UOP_MMX_ENTER &
UOP_MASK]
= codegen_MMX_ENTER,
[UOP_FADD &
UOP_MASK]
= codegen_FADD,
[UOP_FCOM &
UOP_MASK]
= codegen_FCOM,
[UOP_FDIV &
UOP_MASK]
= codegen_FDIV,
[UOP_FMUL &
UOP_MASK]
= codegen_FMUL,
[UOP_FSUB &
UOP_MASK]
= codegen_FSUB,
[UOP_FABS &
UOP_MASK]
= codegen_FABS,
[UOP_FCHS &
UOP_MASK]
= codegen_FCHS,
[UOP_FSQRT &
UOP_MASK]
= codegen_FSQRT,
[UOP_FTST &
UOP_MASK]
= codegen_FTST,
[UOP_PACKSSWB &
UOP_MASK]
= codegen_PACKSSWB,
[UOP_PACKSSDW &
UOP_MASK]
= codegen_PACKSSDW,
[UOP_PACKUSWB &
UOP_MASK]
= codegen_PACKUSWB,
[UOP_PADDB &
UOP_MASK]
= codegen_PADDB,
[UOP_PADDW &
UOP_MASK]
= codegen_PADDW,
[UOP_PADDD &
UOP_MASK]
= codegen_PADDD,
[UOP_PADDSB &
UOP_MASK]
= codegen_PADDSB,
[UOP_PADDSW &
UOP_MASK]
= codegen_PADDSW,
[UOP_PADDUSB &
UOP_MASK]
= codegen_PADDUSB,
[UOP_PADDUSW &
UOP_MASK]
= codegen_PADDUSW,
[UOP_PCMPEQB &
UOP_MASK]
= codegen_PCMPEQB,
[UOP_PCMPEQW &
UOP_MASK]
= codegen_PCMPEQW,
[UOP_PCMPEQD &
UOP_MASK]
= codegen_PCMPEQD,
[UOP_PCMPGTB &
UOP_MASK]
= codegen_PCMPGTB,
[UOP_PCMPGTW &
UOP_MASK]
= codegen_PCMPGTW,
[UOP_PCMPGTD &
UOP_MASK]
= codegen_PCMPGTD,
[UOP_PF2ID &
UOP_MASK]
= codegen_PF2ID,
[UOP_PFADD &
UOP_MASK]
= codegen_PFADD,
[UOP_PFCMPEQ &
UOP_MASK]
= codegen_PFCMPEQ,
[UOP_PFCMPGE &
UOP_MASK]
= codegen_PFCMPGE,
[UOP_PFCMPGT &
UOP_MASK]
= codegen_PFCMPGT,
[UOP_PFMAX &
UOP_MASK]
= codegen_PFMAX,
[UOP_PFMIN &
UOP_MASK]
= codegen_PFMIN,
[UOP_PFMUL &
UOP_MASK]
= codegen_PFMUL,
[UOP_PFRCP &
UOP_MASK]
= codegen_PFRCP,
[UOP_PFRSQRT &
UOP_MASK]
= codegen_PFRSQRT,
[UOP_PFSUB &
UOP_MASK]
= codegen_PFSUB,
[UOP_PI2FD &
UOP_MASK]
= codegen_PI2FD,
[UOP_PMADDWD &
UOP_MASK]
= codegen_PMADDWD,
[UOP_PMULHW &
UOP_MASK]
= codegen_PMULHW,
[UOP_PMULLW &
UOP_MASK]
= codegen_PMULLW,
[UOP_PSLLW_IMM &
UOP_MASK]
= codegen_PSLLW_IMM,
[UOP_PSLLD_IMM &
UOP_MASK]
= codegen_PSLLD_IMM,
[UOP_PSLLQ_IMM &
UOP_MASK]
= codegen_PSLLQ_IMM,
[UOP_PSRAW_IMM &
UOP_MASK]
= codegen_PSRAW_IMM,
[UOP_PSRAD_IMM &
UOP_MASK]
= codegen_PSRAD_IMM,
[UOP_PSRAQ_IMM &
UOP_MASK]
= codegen_PSRAQ_IMM,
[UOP_PSRLW_IMM &
UOP_MASK]
= codegen_PSRLW_IMM,
[UOP_PSRLD_IMM &
UOP_MASK]
= codegen_PSRLD_IMM,
[UOP_PSRLQ_IMM &
UOP_MASK]
= codegen_PSRLQ_IMM,
[UOP_PSUBB &
UOP_MASK]
= codegen_PSUBB,
[UOP_PSUBW &
UOP_MASK]
= codegen_PSUBW,
[UOP_PSUBD &
UOP_MASK]
= codegen_PSUBD,
[UOP_PSUBSB &
UOP_MASK]
= codegen_PSUBSB,
[UOP_PSUBSW &
UOP_MASK]
= codegen_PSUBSW,
[UOP_PSUBUSB &
UOP_MASK]
= codegen_PSUBUSB,
[UOP_PSUBUSW &
UOP_MASK]
= codegen_PSUBUSW,
[UOP_PUNPCKHBW &
UOP_MASK]
= codegen_PUNPCKHBW,
[UOP_PUNPCKHWD &
UOP_MASK]
= codegen_PUNPCKHWD,
[UOP_PUNPCKHDQ &
UOP_MASK]
= codegen_PUNPCKHDQ,
[UOP_PUNPCKLBW &
UOP_MASK]
= codegen_PUNPCKLBW,
[UOP_PUNPCKLWD &
UOP_MASK]
= codegen_PUNPCKLWD,
[UOP_PUNPCKLDQ &
UOP_MASK]
= codegen_PUNPCKLDQ,
[UOP_NOP_BARRIER &
UOP_MASK]
= codegen_NOP
};
void
codegen_direct_read_8(codeblock_t *block, int host_reg, void *p)
{
if (in_range_h(p, &cpu_state))
host_arm_LDRB_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else
fatal("codegen_direct_read_8 - not in range\n");
}
void
codegen_direct_read_16(codeblock_t *block, int host_reg, void *p)
{
if (in_range_h(p, &cpu_state))
host_arm_LDRH_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else {
host_arm_MOV_IMM(block, REG_R3, (uintptr_t) p - (uintptr_t) &cpu_state);
host_arm_LDRH_REG(block, host_reg, REG_CPUSTATE, REG_R3);
}
}
void
codegen_direct_read_32(codeblock_t *block, int host_reg, void *p)
{
if (in_range(p, &cpu_state))
host_arm_LDR_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else
fatal("codegen_direct_read_32 - not in range\n");
}
void
codegen_direct_read_pointer(codeblock_t *block, int host_reg, void *p)
{
codegen_direct_read_32(block, host_reg, p);
}
void
codegen_direct_read_64(codeblock_t *block, int host_reg, void *p)
{
host_arm_VLDR_D(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
}
void
codegen_direct_read_double(codeblock_t *block, int host_reg, void *p)
{
host_arm_VLDR_D(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
}
void
codegen_direct_read_st_8(codeblock_t *block, int host_reg, void *base, int reg_idx)
{
host_arm_LDR_IMM(block, REG_TEMP, REG_HOST_SP, IREG_TOP_diff_stack_offset);
host_arm_ADD_IMM(block, REG_TEMP, REG_TEMP, reg_idx);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 7);
host_arm_ADD_REG_LSL(block, REG_TEMP, REG_CPUSTATE, REG_TEMP, 3);
host_arm_LDRB_IMM(block, host_reg, REG_TEMP, (uintptr_t) base - (uintptr_t) &cpu_state);
}
void
codegen_direct_read_st_64(codeblock_t *block, int host_reg, void *base, int reg_idx)
{
host_arm_LDR_IMM(block, REG_TEMP, REG_HOST_SP, IREG_TOP_diff_stack_offset);
host_arm_ADD_IMM(block, REG_TEMP, REG_TEMP, reg_idx);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 7);
host_arm_ADD_REG_LSL(block, REG_TEMP, REG_CPUSTATE, REG_TEMP, 3);
host_arm_VLDR_D(block, host_reg, REG_TEMP, (uintptr_t) base - (uintptr_t) &cpu_state);
}
void
codegen_direct_read_st_double(codeblock_t *block, int host_reg, void *base, int reg_idx)
{
host_arm_LDR_IMM(block, REG_TEMP, REG_HOST_SP, IREG_TOP_diff_stack_offset);
host_arm_ADD_IMM(block, REG_TEMP, REG_TEMP, reg_idx);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 7);
host_arm_ADD_REG_LSL(block, REG_TEMP, REG_CPUSTATE, REG_TEMP, 3);
host_arm_VLDR_D(block, host_reg, REG_TEMP, (uintptr_t) base - (uintptr_t) &cpu_state);
}
void
codegen_direct_write_8(codeblock_t *block, void *p, int host_reg)
{
if (in_range(p, &cpu_state))
host_arm_STRB_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else
fatal("codegen_direct_write_8 - not in range\n");
}
void
codegen_direct_write_16(codeblock_t *block, void *p, int host_reg)
{
if (in_range_h(p, &cpu_state))
host_arm_STRH_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else {
host_arm_MOV_IMM(block, REG_R3, (uintptr_t) p - (uintptr_t) &cpu_state);
host_arm_STRH_REG(block, host_reg, REG_CPUSTATE, REG_R3);
}
}
void
codegen_direct_write_32(codeblock_t *block, void *p, int host_reg)
{
if (in_range(p, &cpu_state))
host_arm_STR_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else
fatal("codegen_direct_write_32 - not in range\n");
}
void
codegen_direct_write_64(codeblock_t *block, void *p, int host_reg)
{
host_arm_VSTR_D(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
}
void
codegen_direct_write_double(codeblock_t *block, void *p, int host_reg)
{
host_arm_VSTR_D(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
}
void
codegen_direct_write_st_8(codeblock_t *block, void *base, int reg_idx, int host_reg)
{
host_arm_LDR_IMM(block, REG_TEMP, REG_HOST_SP, IREG_TOP_diff_stack_offset);
host_arm_ADD_IMM(block, REG_TEMP, REG_TEMP, reg_idx);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 7);
host_arm_ADD_REG_LSL(block, REG_TEMP, REG_CPUSTATE, REG_TEMP, 3);
host_arm_STRB_IMM(block, host_reg, REG_TEMP, (uintptr_t) base - (uintptr_t) &cpu_state);
}
void
codegen_direct_write_st_64(codeblock_t *block, void *base, int reg_idx, int host_reg)
{
host_arm_LDR_IMM(block, REG_TEMP, REG_HOST_SP, IREG_TOP_diff_stack_offset);
host_arm_ADD_IMM(block, REG_TEMP, REG_TEMP, reg_idx);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 7);
host_arm_ADD_REG_LSL(block, REG_TEMP, REG_CPUSTATE, REG_TEMP, 3);
host_arm_VSTR_D(block, host_reg, REG_TEMP, (uintptr_t) base - (uintptr_t) &cpu_state);
}
void
codegen_direct_write_st_double(codeblock_t *block, void *base, int reg_idx, int host_reg)
{
host_arm_LDR_IMM(block, REG_TEMP, REG_HOST_SP, IREG_TOP_diff_stack_offset);
host_arm_ADD_IMM(block, REG_TEMP, REG_TEMP, reg_idx);
host_arm_AND_IMM(block, REG_TEMP, REG_TEMP, 7);
host_arm_ADD_REG_LSL(block, REG_TEMP, REG_CPUSTATE, REG_TEMP, 3);
host_arm_VSTR_D(block, host_reg, REG_TEMP, (uintptr_t) base - (uintptr_t) &cpu_state);
}
void
codegen_direct_write_ptr(codeblock_t *block, void *p, int host_reg)
{
if (in_range(p, &cpu_state))
host_arm_STR_IMM(block, host_reg, REG_CPUSTATE, (uintptr_t) p - (uintptr_t) &cpu_state);
else
fatal("codegen_direct_write_ptr - not in range\n");
}
void
codegen_direct_read_16_stack(codeblock_t *block, int host_reg, int stack_offset)
{
if (stack_offset >= 0 && stack_offset < 256)
host_arm_LDRH_IMM(block, host_reg, REG_HOST_SP, stack_offset);
else
fatal("codegen_direct_read_32 - not in range\n");
}
void
codegen_direct_read_32_stack(codeblock_t *block, int host_reg, int stack_offset)
{
if (stack_offset >= 0 && stack_offset < 4096)
host_arm_LDR_IMM(block, host_reg, REG_HOST_SP, stack_offset);
else
fatal("codegen_direct_read_32 - not in range\n");
}
void
codegen_direct_read_pointer_stack(codeblock_t *block, int host_reg, int stack_offset)
{
codegen_direct_read_32_stack(block, host_reg, stack_offset);
}
void
codegen_direct_read_64_stack(codeblock_t *block, int host_reg, int stack_offset)
{
host_arm_VLDR_D(block, host_reg, REG_HOST_SP, stack_offset);
}
void
codegen_direct_read_double_stack(codeblock_t *block, int host_reg, int stack_offset)
{
host_arm_VLDR_D(block, host_reg, REG_HOST_SP, stack_offset);
}
void
codegen_direct_write_32_stack(codeblock_t *block, int stack_offset, int host_reg)
{
if (stack_offset >= 0 && stack_offset < 4096)
host_arm_STR_IMM(block, host_reg, REG_HOST_SP, stack_offset);
else
fatal("codegen_direct_write_32 - not in range\n");
}
void
codegen_direct_write_64_stack(codeblock_t *block, int stack_offset, int host_reg)
{
host_arm_VSTR_D(block, host_reg, REG_HOST_SP, stack_offset);
}
void
codegen_direct_write_double_stack(codeblock_t *block, int stack_offset, int host_reg)
{
host_arm_VSTR_D(block, host_reg, REG_HOST_SP, stack_offset);
}
void
codegen_set_jump_dest(codeblock_t *block, void *p)
{
*(uint32_t *) p |= ((((uintptr_t) &block_write_data[block_pos] - (uintptr_t) p) - 8) & 0x3fffffc) >> 2;
}
#endif
``` | /content/code_sandbox/src/codegen_new/codegen_backend_arm_uops.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 44,380 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* FPU type handler.
*
* Authors: Sarah Walker, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#include <math.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#ifdef ENABLE_FPU_LOG
int fpu_do_log = ENABLE_FPU_LOG;
void
fpu_log(const char *fmt, ...)
{
va_list ap;
if (fpu_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define fpu_log(fmt, ...)
#endif
int
fpu_get_type(const cpu_family_t *cpu_family, int cpu, const char *internal_name)
{
const CPU *cpu_s = &cpu_family->cpus[cpu];
const FPU *fpus = cpu_s->fpus;
int fpu_type = fpus[0].type;
int c = 0;
while (fpus[c].internal_name) {
if (!strcmp(internal_name, fpus[c].internal_name))
fpu_type = fpus[c].type;
c++;
}
return fpu_type;
}
const char *
fpu_get_internal_name(const cpu_family_t *cpu_family, int cpu, int type)
{
const CPU *cpu_s = &cpu_family->cpus[cpu];
const FPU *fpus = cpu_s->fpus;
int c = 0;
while (fpus[c].internal_name) {
if (fpus[c].type == type)
return fpus[c].internal_name;
c++;
}
return fpus[0].internal_name;
}
const char *
fpu_get_name_from_index(const cpu_family_t *cpu_family, int cpu, int c)
{
const CPU *cpu_s = &cpu_family->cpus[cpu];
const FPU *fpus = cpu_s->fpus;
return fpus[c].name;
}
int
fpu_get_type_from_index(const cpu_family_t *cpu_family, int cpu, int c)
{
const CPU *cpu_s = &cpu_family->cpus[cpu];
const FPU *fpus = cpu_s->fpus;
return fpus[c].type;
}
``` | /content/code_sandbox/src/cpu/fpu.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 599 |
```objective-c
static int
opRDTSC(uint32_t fetchdat)
{
if (!cpu_has_feature(CPU_FEATURE_RDTSC)) {
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 1;
}
if ((cr4 & CR4_TSD) && CPL) {
x86gpf("RDTSC when TSD set and CPL != 0", 0);
return 1;
}
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
CLOCK_CYCLES(1);
#ifdef USE_DYNAREC
if (cpu_use_dynarec)
update_tsc();
#endif
return 0;
}
static int
opRDPMC(uint32_t fetchdat)
{
if (ECX > 1 || (!(cr4 & CR4_PCE) && (cr0 & 1) && CPL)) {
x86gpf("RDPMC not allowed", 0);
return 1;
}
EAX = EDX = 0;
CLOCK_CYCLES(1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_msr.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 241 |
```objective-c
#define SSATB(val) (((val) < -128) ? -128 : (((val) > 127) ? 127 : (val)))
#define SSATW(val) (((val) < -32768) ? -32768 : (((val) > 32767) ? 32767 : (val)))
#define USATB(val) (((val) < 0) ? 0 : (((val) > 255) ? 255 : (val)))
#define USATW(val) (((val) < 0) ? 0 : (((val) > 65535) ? 65535 : (val)))
#define MMX_GETREGP(r) MMP[r]
#define MMX_GETREG(r) *(MMP[r])
#define MMX_SETEXP(r) \
*(MMEP[r]) = 0xffff
#define MMX_GETSRC() \
if (cpu_mod == 3) { \
src = MMX_GETREG(cpu_rm); \
CLOCK_CYCLES(1); \
} else { \
SEG_CHECK_READ(cpu_state.ea_seg); \
src.q = readmemq(easeg, cpu_state.eaaddr); \
if (cpu_state.abrt) \
return 1; \
CLOCK_CYCLES(2); \
}
#define MMX_ENTER() \
if (!cpu_has_feature(CPU_FEATURE_MMX)) { \
cpu_state.pc = cpu_state.oldpc; \
x86illegal(); \
return 1; \
} \
if (cr0 & 0xc) { \
x86_int(7); \
return 1; \
} \
x87_set_mmx()
static int
opEMMS(uint32_t fetchdat)
{
if (!cpu_has_feature(CPU_FEATURE_MMX)) {
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 1;
}
if (cr0 & 0xc) {
x86_int(7);
return 1;
}
x87_emms();
CLOCK_CYCLES(100); /*Guess*/
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mmx.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 474 |
```objective-c
#ifdef FPU_8087
static int
opFI(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxc &= ~0x80;
if (rmdat == 0xe1)
cpu_state.npxc |= 0x80;
wait(3, 0);
return 0;
}
#else
static int
opFSTSW_AX(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
AX = cpu_state.npxs;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return 0;
}
#endif
static int
opFNOP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fnop) : (x87_timings.fnop * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fnop) : (x87_concurrency.fnop * cpu_multi));
return 0;
}
static int
opFXTRACT(uint32_t fetchdat)
{
x87_conv_t test;
int64_t exp80;
int64_t exp80final;
double mant;
FP_ENTER();
cpu_state.pc++;
test.eind.d = ST(0);
exp80 = test.eind.ll & 0x7ff0000000000000LL;
exp80final = (exp80 >> 52) - BIAS64;
mant = test.eind.d / (pow(2.0, (double) exp80final));
ST(0) = (double) exp80final;
FP_TAG_VALID;
x87_push(mant);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fxtract) : (x87_timings.fxtract * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fxtract) : (x87_concurrency.fxtract * cpu_multi));
return 0;
}
static int
opFCLEX(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= 0xff00;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fnop) : (x87_timings.fnop * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fnop) : (x87_concurrency.fnop * cpu_multi));
return 0;
}
static int
opFINIT(uint32_t fetchdat)
{
uint64_t *p;
FP_ENTER();
cpu_state.pc++;
#ifdef FPU_8087
cpu_state.npxc = 0x3FF;
#else
cpu_state.npxc = 0x37F;
#endif
codegen_set_rounding_mode(X87_ROUNDING_NEAREST);
#ifdef FPU_8087
cpu_state.npxs &= 0x4700;
#else
cpu_state.npxs = 0;
#endif
p = (uint64_t *) cpu_state.tag;
#ifdef USE_NEW_DYNAREC
*p = 0;
#else
*p = 0x0303030303030303LL;
#endif
cpu_state.TOP = 0;
cpu_state.ismmx = 0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.finit) : (x87_timings.finit * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.finit) : (x87_concurrency.finit * cpu_multi));
CPU_BLOCK_END();
return 0;
}
static int
opFFREE(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
#ifdef USE_NEW_DYNAREC
cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = TAG_EMPTY;
#else
cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = 3;
#endif
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.ffree) : (x87_timings.ffree * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.ffree) : (x87_concurrency.ffree * cpu_multi));
return 0;
}
static int
opFFREEP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = 3;
if (cpu_state.abrt)
return 1;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.ffree) : (x87_timings.ffree * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.ffree) : (x87_concurrency.ffree * cpu_multi));
return 0;
}
static int
opFST(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(0);
cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = cpu_state.tag[cpu_state.TOP & 7];
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst) : (x87_timings.fst * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst) : (x87_concurrency.fst * cpu_multi));
return 0;
}
static int
opFSTP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(0);
cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = cpu_state.tag[cpu_state.TOP & 7];
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst) : (x87_timings.fst * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst) : (x87_concurrency.fst * cpu_multi));
return 0;
}
static int
FSTOR(void)
{
uint64_t *p;
FP_ENTER();
switch ((cr0 & 1) | (cpu_state.op32 & 0x100)) {
case 0x000: /*16-bit real mode*/
case 0x001: /*16-bit protected mode*/
cpu_state.npxc = readmemw(easeg, cpu_state.eaaddr);
codegen_set_rounding_mode((cpu_state.npxc >> 10) & 3);
cpu_state.npxs = readmemw(easeg, cpu_state.eaaddr + 2);
x87_settag(readmemw(easeg, cpu_state.eaaddr + 4));
cpu_state.TOP = (cpu_state.npxs >> 11) & 7;
cpu_state.eaaddr += 14;
break;
case 0x100: /*32-bit real mode*/
case 0x101: /*32-bit protected mode*/
cpu_state.npxc = readmemw(easeg, cpu_state.eaaddr);
codegen_set_rounding_mode((cpu_state.npxc >> 10) & 3);
cpu_state.npxs = readmemw(easeg, cpu_state.eaaddr + 4);
x87_settag(readmemw(easeg, cpu_state.eaaddr + 8));
cpu_state.TOP = (cpu_state.npxs >> 11) & 7;
cpu_state.eaaddr += 28;
break;
}
x87_ld_frstor(0);
cpu_state.eaaddr += 10;
x87_ld_frstor(1);
cpu_state.eaaddr += 10;
x87_ld_frstor(2);
cpu_state.eaaddr += 10;
x87_ld_frstor(3);
cpu_state.eaaddr += 10;
x87_ld_frstor(4);
cpu_state.eaaddr += 10;
x87_ld_frstor(5);
cpu_state.eaaddr += 10;
x87_ld_frstor(6);
cpu_state.eaaddr += 10;
x87_ld_frstor(7);
cpu_state.ismmx = 0;
/*Horrible hack, but as PCem doesn't keep the FPU stack in 80-bit precision at all times
something like this is needed*/
p = (uint64_t *) cpu_state.tag;
#ifdef USE_NEW_DYNAREC
if (cpu_state.MM_w4[0] == 0xffff && cpu_state.MM_w4[1] == 0xffff && cpu_state.MM_w4[2] == 0xffff && cpu_state.MM_w4[3] == 0xffff && cpu_state.MM_w4[4] == 0xffff && cpu_state.MM_w4[5] == 0xffff && cpu_state.MM_w4[6] == 0xffff && cpu_state.MM_w4[7] == 0xffff && !cpu_state.TOP && (*p == 0x0101010101010101ULL))
#else
if (cpu_state.MM_w4[0] == 0xffff && cpu_state.MM_w4[1] == 0xffff && cpu_state.MM_w4[2] == 0xffff && cpu_state.MM_w4[3] == 0xffff && cpu_state.MM_w4[4] == 0xffff && cpu_state.MM_w4[5] == 0xffff && cpu_state.MM_w4[6] == 0xffff && cpu_state.MM_w4[7] == 0xffff && !cpu_state.TOP && !(*p))
#endif
cpu_state.ismmx = 1;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.frstor) : (x87_timings.frstor * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.frstor) : (x87_concurrency.frstor * cpu_multi));
return cpu_state.abrt;
}
static int
opFSTOR_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
FSTOR();
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
opFSTOR_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
FSTOR();
return cpu_state.abrt;
}
#endif
static int
FSAVE(void)
{
uint64_t *p;
FP_ENTER();
cpu_state.npxs = (cpu_state.npxs & ~(7 << 11)) | ((cpu_state.TOP & 7) << 11);
switch ((cr0 & 1) | (cpu_state.op32 & 0x100)) {
case 0x000: /*16-bit real mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 2, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 4, x87_gettag());
writememw(easeg, cpu_state.eaaddr + 6, x87_pc_off);
writememw(easeg, cpu_state.eaaddr + 10, x87_op_off);
cpu_state.eaaddr += 14;
if (cpu_state.ismmx) {
x87_stmmx(cpu_state.MM[0]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[1]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[2]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[3]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[4]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[5]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[6]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[7]);
} else {
x87_st_fsave(0);
cpu_state.eaaddr += 10;
x87_st_fsave(1);
cpu_state.eaaddr += 10;
x87_st_fsave(2);
cpu_state.eaaddr += 10;
x87_st_fsave(3);
cpu_state.eaaddr += 10;
x87_st_fsave(4);
cpu_state.eaaddr += 10;
x87_st_fsave(5);
cpu_state.eaaddr += 10;
x87_st_fsave(6);
cpu_state.eaaddr += 10;
x87_st_fsave(7);
}
break;
case 0x001: /*16-bit protected mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 2, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 4, x87_gettag());
writememw(easeg, cpu_state.eaaddr + 6, x87_pc_off);
writememw(easeg, cpu_state.eaaddr + 8, x87_pc_seg);
writememw(easeg, cpu_state.eaaddr + 10, x87_op_off);
writememw(easeg, cpu_state.eaaddr + 12, x87_op_seg);
cpu_state.eaaddr += 14;
if (cpu_state.ismmx) {
x87_stmmx(cpu_state.MM[0]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[1]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[2]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[3]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[4]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[5]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[6]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[7]);
} else {
x87_st_fsave(0);
cpu_state.eaaddr += 10;
x87_st_fsave(1);
cpu_state.eaaddr += 10;
x87_st_fsave(2);
cpu_state.eaaddr += 10;
x87_st_fsave(3);
cpu_state.eaaddr += 10;
x87_st_fsave(4);
cpu_state.eaaddr += 10;
x87_st_fsave(5);
cpu_state.eaaddr += 10;
x87_st_fsave(6);
cpu_state.eaaddr += 10;
x87_st_fsave(7);
}
break;
case 0x100: /*32-bit real mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 4, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 8, x87_gettag());
writememw(easeg, cpu_state.eaaddr + 12, x87_pc_off);
writememw(easeg, cpu_state.eaaddr + 20, x87_op_off);
writememl(easeg, cpu_state.eaaddr + 24, (x87_op_off >> 16) << 12);
cpu_state.eaaddr += 28;
if (cpu_state.ismmx) {
x87_stmmx(cpu_state.MM[0]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[1]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[2]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[3]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[4]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[5]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[6]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[7]);
} else {
x87_st_fsave(0);
cpu_state.eaaddr += 10;
x87_st_fsave(1);
cpu_state.eaaddr += 10;
x87_st_fsave(2);
cpu_state.eaaddr += 10;
x87_st_fsave(3);
cpu_state.eaaddr += 10;
x87_st_fsave(4);
cpu_state.eaaddr += 10;
x87_st_fsave(5);
cpu_state.eaaddr += 10;
x87_st_fsave(6);
cpu_state.eaaddr += 10;
x87_st_fsave(7);
}
break;
case 0x101: /*32-bit protected mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 4, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 8, x87_gettag());
writememl(easeg, cpu_state.eaaddr + 12, x87_pc_off);
writememl(easeg, cpu_state.eaaddr + 16, x87_pc_seg);
writememl(easeg, cpu_state.eaaddr + 20, x87_op_off);
writememl(easeg, cpu_state.eaaddr + 24, x87_op_seg);
cpu_state.eaaddr += 28;
if (cpu_state.ismmx) {
x87_stmmx(cpu_state.MM[0]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[1]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[2]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[3]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[4]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[5]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[6]);
cpu_state.eaaddr += 10;
x87_stmmx(cpu_state.MM[7]);
} else {
x87_st_fsave(0);
cpu_state.eaaddr += 10;
x87_st_fsave(1);
cpu_state.eaaddr += 10;
x87_st_fsave(2);
cpu_state.eaaddr += 10;
x87_st_fsave(3);
cpu_state.eaaddr += 10;
x87_st_fsave(4);
cpu_state.eaaddr += 10;
x87_st_fsave(5);
cpu_state.eaaddr += 10;
x87_st_fsave(6);
cpu_state.eaaddr += 10;
x87_st_fsave(7);
}
break;
}
cpu_state.npxc = 0x37F;
codegen_set_rounding_mode(X87_ROUNDING_NEAREST);
#ifdef FPU_8087
cpu_state.npxs &= 0x4700;
#else
cpu_state.npxs = 0;
#endif
p = (uint64_t *) cpu_state.tag;
#ifdef USE_NEW_DYNAREC
*p = 0;
#else
*p = 0x0303030303030303LL;
#endif
cpu_state.TOP = 0;
cpu_state.ismmx = 0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsave) : (x87_timings.fsave * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsave) : (x87_concurrency.fsave * cpu_multi));
return cpu_state.abrt;
}
static int
opFSAVE_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
FSAVE();
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
opFSAVE_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
FSAVE();
return cpu_state.abrt;
}
#endif
static int
opFSTSW_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw((cpu_state.npxs & 0xC7FF) | ((cpu_state.TOP & 7) << 11));
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
opFSTSW_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw((cpu_state.npxs & 0xC7FF) | ((cpu_state.TOP & 7) << 11));
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
opFLD(uint32_t fetchdat)
{
int old_tag;
uint64_t old_i64;
FP_ENTER();
cpu_state.pc++;
old_tag = cpu_state.tag[(cpu_state.TOP + fetchdat) & 7];
old_i64 = cpu_state.MM[(cpu_state.TOP + fetchdat) & 7].q;
x87_push(ST(fetchdat & 7));
cpu_state.tag[cpu_state.TOP & 7] = old_tag;
cpu_state.MM[cpu_state.TOP & 7].q = old_i64;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld) : (x87_timings.fld * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld) : (x87_concurrency.fld * cpu_multi));
return 0;
}
static int
opFXCH(uint32_t fetchdat)
{
double td;
uint8_t old_tag;
uint64_t old_i64;
FP_ENTER();
cpu_state.pc++;
td = ST(0);
ST(0) = ST(fetchdat & 7);
ST(fetchdat & 7) = td;
old_tag = cpu_state.tag[cpu_state.TOP & 7];
cpu_state.tag[cpu_state.TOP & 7] = cpu_state.tag[(cpu_state.TOP + fetchdat) & 7];
cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = old_tag;
old_i64 = cpu_state.MM[cpu_state.TOP & 7].q;
cpu_state.MM[cpu_state.TOP & 7].q = cpu_state.MM[(cpu_state.TOP + fetchdat) & 7].q;
cpu_state.MM[(cpu_state.TOP + fetchdat) & 7].q = old_i64;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fxch) : (x87_timings.fxch * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fxch) : (x87_concurrency.fxch * cpu_multi));
return 0;
}
static int
opFCHS(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = -ST(0);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fchs) : (x87_timings.fchs * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fchs) : (x87_concurrency.fchs * cpu_multi));
return 0;
}
static int
opFABS(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = fabs(ST(0));
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fabs) : (x87_timings.fabs * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fabs) : (x87_concurrency.fabs * cpu_multi));
return 0;
}
static int
opFTST(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (ST(0) == 0.0)
cpu_state.npxs |= FPU_SW_C3;
else if (ST(0) < 0.0)
cpu_state.npxs |= FPU_SW_C0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.ftst) : (x87_timings.ftst * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.ftst) : (x87_concurrency.ftst * cpu_multi));
return 0;
}
static int
opFXAM(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C1 | FPU_SW_C2 | FPU_SW_C3);
#ifdef USE_NEW_DYNAREC
if (cpu_state.tag[cpu_state.TOP & 7] == TAG_EMPTY)
cpu_state.npxs |= (FPU_SW_C0 | FPU_SW_C3);
#else
if (cpu_state.tag[cpu_state.TOP & 7] == 3)
cpu_state.npxs |= (FPU_SW_C0 | FPU_SW_C3);
#endif
else if (ST(0) == 0.0)
cpu_state.npxs |= FPU_SW_C3;
else
cpu_state.npxs |= FPU_SW_C2;
if (ST(0) < 0.0)
cpu_state.npxs |= FPU_SW_C1;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fxam) : (x87_timings.fxam * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fxam) : (x87_concurrency.fxam * cpu_multi));
return 0;
}
static int
opFLD1(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push(1.0);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_z1) : (x87_timings.fld_z1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_z1) : (x87_concurrency.fld_z1 * cpu_multi));
return 0;
}
static int
opFLDL2T(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push(3.3219280948873623);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
opFLDL2E(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push(1.4426950408889634);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
opFLDPI(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push(3.141592653589793);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
opFLDEG2(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push(0.3010299956639812);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
opFLDLN2(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push_u64(0x3fe62e42fefa39f0ULL);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
opFLDZ(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_push(0.0);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_z1) : (x87_timings.fld_z1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_z1) : (x87_concurrency.fld_z1 * cpu_multi));
return 0;
}
static int
opF2XM1(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = pow(2.0, ST(0)) - 1.0;
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.f2xm1) : (x87_timings.f2xm1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.f2xm1) : (x87_concurrency.f2xm1 * cpu_multi));
return 0;
}
static int
opFYL2X(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(1) = ST(1) * (log(ST(0)) / log(2.0));
FP_TAG_VALID_N;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fyl2x) : (x87_timings.fyl2x * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fyl2x) : (x87_concurrency.fyl2x * cpu_multi));
return 0;
}
static int
opFYL2XP1(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(1) = ST(1) * (log1p(ST(0)) / log(2.0));
FP_TAG_VALID_N;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fyl2xp1) : (x87_timings.fyl2xp1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fyl2xp1) : (x87_concurrency.fyl2xp1 * cpu_multi));
return 0;
}
static int
opFPTAN(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = tan(ST(0));
FP_TAG_VALID;
x87_push(1.0);
cpu_state.npxs &= ~FPU_SW_C2;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fptan) : (x87_timings.fptan * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fptan) : (x87_concurrency.fptan * cpu_multi));
return 0;
}
static int
opFPATAN(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(1) = atan2(ST(1), ST(0));
FP_TAG_VALID_N;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fpatan) : (x87_timings.fpatan * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fpatan) : (x87_concurrency.fpatan * cpu_multi));
return 0;
}
static int
opFDECSTP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
#ifdef USE_NEW_DYNAREC
cpu_state.TOP--;
#else
cpu_state.TOP = (cpu_state.TOP - 1) & 7;
#endif
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fincdecstp) : (x87_timings.fincdecstp * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fincdecstp) : (x87_concurrency.fincdecstp * cpu_multi));
return 0;
}
static int
opFINCSTP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
#ifdef USE_NEW_DYNAREC
cpu_state.TOP++;
#else
cpu_state.TOP = (cpu_state.TOP + 1) & 7;
#endif
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fincdecstp) : (x87_timings.fincdecstp * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fincdecstp) : (x87_concurrency.fincdecstp * cpu_multi));
return 0;
}
static int
opFPREM(uint32_t fetchdat)
{
int64_t temp64;
FP_ENTER();
cpu_state.pc++;
temp64 = (int64_t) (ST(0) / ST(1));
ST(0) = ST(0) - (ST(1) * (double) temp64);
FP_TAG_VALID;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C1 | FPU_SW_C2 | FPU_SW_C3);
if (temp64 & 4)
cpu_state.npxs |= FPU_SW_C0;
if (temp64 & 2)
cpu_state.npxs |= FPU_SW_C3;
if (temp64 & 1)
cpu_state.npxs |= FPU_SW_C1;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fprem) : (x87_timings.fprem * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fprem) : (x87_concurrency.fprem * cpu_multi));
return 0;
}
static int
opFPREM1(uint32_t fetchdat)
{
int64_t temp64;
FP_ENTER();
cpu_state.pc++;
temp64 = (int64_t) (ST(0) / ST(1));
ST(0) = ST(0) - (ST(1) * (double) temp64);
FP_TAG_VALID;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C1 | FPU_SW_C2 | FPU_SW_C3);
if (temp64 & 4)
cpu_state.npxs |= FPU_SW_C0;
if (temp64 & 2)
cpu_state.npxs |= FPU_SW_C3;
if (temp64 & 1)
cpu_state.npxs |= FPU_SW_C1;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fprem1) : (x87_timings.fprem1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fprem1) : (x87_concurrency.fprem1 * cpu_multi));
return 0;
}
static int
opFSQRT(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = sqrt(ST(0));
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsqrt) : (x87_timings.fsqrt * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsqrt) : (x87_concurrency.fsqrt * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
opFSINCOS(uint32_t fetchdat)
{
double td;
FP_ENTER();
cpu_state.pc++;
td = ST(0);
ST(0) = sin(td);
FP_TAG_VALID;
x87_push(cos(td));
cpu_state.npxs &= ~FPU_SW_C2;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsincos) : (x87_timings.fsincos * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsincos) : (x87_concurrency.fsincos * cpu_multi));
return 0;
}
#endif
static int
opFRNDINT(uint32_t fetchdat)
{
double dst0;
FP_ENTER();
cpu_state.pc++;
dst0 = x87_fround(ST(0));
ST(0) = (double) dst0;
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.frndint) : (x87_timings.frndint * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.frndint) : (x87_concurrency.frndint * cpu_multi));
return 0;
}
static int
opFSCALE(uint32_t fetchdat)
{
int64_t temp64;
FP_ENTER();
cpu_state.pc++;
temp64 = (int64_t) ST(1);
if (ST(0) != 0.0)
ST(0) = ST(0) * pow(2.0, (double) temp64);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fscale) : (x87_timings.fscale * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fscale) : (x87_concurrency.fscale * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
opFSIN(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = sin(ST(0));
FP_TAG_VALID;
cpu_state.npxs &= ~FPU_SW_C2;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsin_cos) : (x87_timings.fsin_cos * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsin_cos) : (x87_concurrency.fsin_cos * cpu_multi));
return 0;
}
static int
opFCOS(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = cos(ST(0));
FP_TAG_VALID;
cpu_state.npxs &= ~FPU_SW_C2;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsin_cos) : (x87_timings.fsin_cos * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsin_cos) : (x87_concurrency.fsin_cos * cpu_multi));
return 0;
}
#endif
static int
FLDENV(void)
{
FP_ENTER();
switch ((cr0 & 1) | (cpu_state.op32 & 0x100)) {
case 0x000: /*16-bit real mode*/
case 0x001: /*16-bit protected mode*/
cpu_state.npxc = readmemw(easeg, cpu_state.eaaddr);
codegen_set_rounding_mode((cpu_state.npxc >> 10) & 3);
cpu_state.npxs = readmemw(easeg, cpu_state.eaaddr + 2);
x87_settag(readmemw(easeg, cpu_state.eaaddr + 4));
cpu_state.TOP = (cpu_state.npxs >> 11) & 7;
break;
case 0x100: /*32-bit real mode*/
case 0x101: /*32-bit protected mode*/
cpu_state.npxc = readmemw(easeg, cpu_state.eaaddr);
codegen_set_rounding_mode((cpu_state.npxc >> 10) & 3);
cpu_state.npxs = readmemw(easeg, cpu_state.eaaddr + 4);
x87_settag(readmemw(easeg, cpu_state.eaaddr + 8));
cpu_state.TOP = (cpu_state.npxs >> 11) & 7;
break;
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldenv) : (x87_timings.fldenv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldenv) : (x87_concurrency.fldenv * cpu_multi));
return cpu_state.abrt;
}
static int
opFLDENV_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
FLDENV();
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
opFLDENV_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
FLDENV();
return cpu_state.abrt;
}
#endif
static int
opFLDCW_a16(uint32_t fetchdat)
{
uint16_t tempw;
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
tempw = geteaw();
if (cpu_state.abrt)
return 1;
cpu_state.npxc = tempw;
codegen_set_rounding_mode((cpu_state.npxc >> 10) & 3);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldcw) : (x87_timings.fldcw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldcw) : (x87_concurrency.fldcw * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
opFLDCW_a32(uint32_t fetchdat)
{
uint16_t tempw;
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
tempw = geteaw();
if (cpu_state.abrt)
return 1;
cpu_state.npxc = tempw;
codegen_set_rounding_mode((cpu_state.npxc >> 10) & 3);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldcw) : (x87_timings.fldcw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldcw) : (x87_concurrency.fldcw * cpu_multi));
return 0;
}
#endif
static int
FSTENV(void)
{
FP_ENTER();
cpu_state.npxs = (cpu_state.npxs & ~(7 << 11)) | ((cpu_state.TOP & 7) << 11);
switch ((cr0 & 1) | (cpu_state.op32 & 0x100)) {
case 0x000: /*16-bit real mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 2, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 4, x87_gettag());
writememw(easeg, cpu_state.eaaddr + 6, x87_pc_off);
writememw(easeg, cpu_state.eaaddr + 10, x87_op_off);
break;
case 0x001: /*16-bit protected mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 2, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 4, x87_gettag());
writememw(easeg, cpu_state.eaaddr + 6, x87_pc_off);
writememw(easeg, cpu_state.eaaddr + 8, x87_pc_seg);
writememw(easeg, cpu_state.eaaddr + 10, x87_op_off);
writememw(easeg, cpu_state.eaaddr + 12, x87_op_seg);
break;
case 0x100: /*32-bit real mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 4, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 8, x87_gettag());
writememw(easeg, cpu_state.eaaddr + 12, x87_pc_off);
writememw(easeg, cpu_state.eaaddr + 20, x87_op_off);
writememl(easeg, cpu_state.eaaddr + 24, (x87_op_off >> 16) << 12);
break;
case 0x101: /*32-bit protected mode*/
writememw(easeg, cpu_state.eaaddr, cpu_state.npxc);
writememw(easeg, cpu_state.eaaddr + 4, cpu_state.npxs);
writememw(easeg, cpu_state.eaaddr + 8, x87_gettag());
writememl(easeg, cpu_state.eaaddr + 12, x87_pc_off);
writememl(easeg, cpu_state.eaaddr + 16, x87_pc_seg);
writememl(easeg, cpu_state.eaaddr + 20, x87_op_off);
writememl(easeg, cpu_state.eaaddr + 24, x87_op_seg);
break;
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstenv) : (x87_timings.fstenv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstenv) : (x87_concurrency.fstenv * cpu_multi));
return cpu_state.abrt;
}
static int
opFSTENV_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
FSTENV();
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
opFSTENV_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
FSTENV();
return cpu_state.abrt;
}
#endif
static int
opFSTCW_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(cpu_state.npxc);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstenv) : (x87_concurrency.fstenv * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
opFSTCW_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(cpu_state.npxc);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return cpu_state.abrt;
}
#endif
#ifndef FPU_8087
# ifndef OPS_286_386
# define opFCMOV(condition) \
static int opFCMOV##condition(uint32_t fetchdat) \
{ \
FP_ENTER(); \
cpu_state.pc++; \
if (cond_##condition) { \
cpu_state.tag[cpu_state.TOP & 7] = cpu_state.tag[(cpu_state.TOP + fetchdat) & 7]; \
cpu_state.MM[cpu_state.TOP & 7].q = cpu_state.MM[(cpu_state.TOP + fetchdat) & 7].q; \
ST(0) = ST(fetchdat & 7); \
} \
CLOCK_CYCLES_FPU(4); \
return 0; \
}
# define cond_U (PF_SET())
# define cond_NU (!PF_SET())
// clang-format off
opFCMOV(B)
opFCMOV(E)
opFCMOV(BE)
opFCMOV(U)
opFCMOV(NB)
opFCMOV(NE)
opFCMOV(NBE)
opFCMOV(NU)
// clang-format on
# endif
#endif
``` | /content/code_sandbox/src/cpu/x87_ops_misc.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 12,125 |
```objective-c
#ifdef IS_DYNAREC
# define BS_common(start, end, dir, dest, time) \
flags_rebuild(); \
if (temp) { \
int c; \
cpu_state.flags &= ~Z_FLAG; \
for (c = start; c != end; c += dir) { \
CLOCK_CYCLES(time); \
if (temp & (1 << c)) { \
dest = c; \
break; \
} \
} \
} else \
cpu_state.flags |= Z_FLAG;
#else
# define BS_common(start, end, dir, dest, time) \
flags_rebuild(); \
instr_cycles = 0; \
if (temp) { \
int c; \
cpu_state.flags &= ~Z_FLAG; \
for (c = start; c != end; c += dir) { \
CLOCK_CYCLES(time); \
instr_cycles += time; \
if (temp & (1 << c)) { \
dest = c; \
break; \
} \
} \
} else \
cpu_state.flags |= Z_FLAG;
#endif
static int
opBSF_w_a16(uint32_t fetchdat)
{
uint16_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
BS_common(0, 16, 1, cpu_state.regs[cpu_reg].w, (is486) ? 1 : 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
#endif
return 0;
}
static int
opBSF_w_a32(uint32_t fetchdat)
{
uint16_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
BS_common(0, 16, 1, cpu_state.regs[cpu_reg].w, (is486) ? 1 : 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
#endif
return 0;
}
static int
opBSF_l_a16(uint32_t fetchdat)
{
uint32_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
BS_common(0, 32, 1, cpu_state.regs[cpu_reg].l, (is486) ? 1 : 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
#endif
return 0;
}
static int
opBSF_l_a32(uint32_t fetchdat)
{
uint32_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
BS_common(0, 32, 1, cpu_state.regs[cpu_reg].l, (is486) ? 1 : 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
#endif
return 0;
}
static int
opBSR_w_a16(uint32_t fetchdat)
{
uint16_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
BS_common(15, -1, -1, cpu_state.regs[cpu_reg].w, 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
#endif
return 0;
}
static int
opBSR_w_a32(uint32_t fetchdat)
{
uint16_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
BS_common(15, -1, -1, cpu_state.regs[cpu_reg].w, 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
#endif
return 0;
}
static int
opBSR_l_a16(uint32_t fetchdat)
{
uint32_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
BS_common(31, -1, -1, cpu_state.regs[cpu_reg].l, 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
#endif
return 0;
}
static int
opBSR_l_a32(uint32_t fetchdat)
{
uint32_t temp;
#ifndef IS_DYNAREC
int instr_cycles = 0;
#endif
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
BS_common(31, -1, -1, cpu_state.regs[cpu_reg].l, 3);
CLOCK_CYCLES((is486) ? 6 : 10);
#ifndef IS_DYNAREC
instr_cycles += ((is486) ? 6 : 10);
PREFETCH_RUN(instr_cycles, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
#endif
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_bitscan.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,879 |
```objective-c
static int
opCBW(uint32_t fetchdat)
{
AH = (AL & 0x80) ? 0xff : 0;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCWDE(uint32_t fetchdat)
{
EAX = (AX & 0x8000) ? (0xffff0000 | AX) : AX;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCWD(uint32_t fetchdat)
{
DX = (AX & 0x8000) ? 0xFFFF : 0;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCDQ(uint32_t fetchdat)
{
EDX = (EAX & 0x80000000) ? 0xffffffff : 0;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opNOP(uint32_t fetchdat)
{
CLOCK_CYCLES((is486) ? 1 : 3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSETALC(uint32_t fetchdat)
{
AL = (CF_SET()) ? 0xff : 0;
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opF6_a16(uint32_t fetchdat)
{
int tempws = 0;
int tempws2 = 0;
uint16_t tempw = 0;
uint16_t src16;
uint8_t src;
uint8_t dst;
int8_t temps;
fetch_ea_16(fetchdat);
if (cpu_mod != 3) {
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr);
}
dst = geteab();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*TEST b,#8*/
case 0x08:
src = readmemb(cs, cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
setznp8(src & dst);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x10: /*NOT b*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteab(~dst);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
break;
case 0x18: /*NEG b*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteab(0 - dst);
if (cpu_state.abrt)
return 1;
setsub8(0, dst);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
break;
case 0x20: /*MUL AL,b*/
AX = AL * dst;
flags_rebuild();
if (AH)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(13);
PREFETCH_RUN(13, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x28: /*IMUL AL,b*/
tempws = (int) ((int8_t) AL) * (int) ((int8_t) dst);
AX = tempws & 0xffff;
flags_rebuild();
if (((int16_t) AX >> 7) != 0 && ((int16_t) AX >> 7) != -1)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x30: /*DIV AL,b*/
src16 = AX;
if (dst)
tempw = src16 / dst;
if (dst && !(tempw & 0xff00)) {
AH = src16 % dst;
AL = (src16 / dst) & 0xff;
if (!cpu_iscyrix && !is6117) {
flags_rebuild();
cpu_state.flags |= 0x8D5; /*Not a Cyrix*/
cpu_state.flags &= ~1;
}
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES((is486 && !cpu_iscyrix) ? 16 : 14);
PREFETCH_RUN((is486 && !cpu_iscyrix) ? 16 : 14, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x38: /*IDIV AL,b*/
tempws = (int) (int16_t) AX;
if (dst != 0)
tempws2 = tempws / (int) ((int8_t) dst);
temps = tempws2 & 0xff;
if (dst && ((int) temps == tempws2)) {
AH = (tempws % (int) ((int8_t) dst)) & 0xff;
AL = tempws2 & 0xff;
if (!cpu_iscyrix && !is6117) {
flags_rebuild();
cpu_state.flags |= 0x8D5; /*Not a Cyrix*/
cpu_state.flags &= ~1;
}
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES(19);
PREFETCH_RUN(19, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
default:
x86illegal();
}
return 0;
}
static int
opF6_a32(uint32_t fetchdat)
{
int tempws = 0;
int tempws2 = 0;
uint16_t tempw = 0;
uint16_t src16;
uint8_t src;
uint8_t dst;
int8_t temps;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteab();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*TEST b,#8*/
case 0x08:
src = readmemb(cs, cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
setznp8(src & dst);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x10: /*NOT b*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteab(~dst);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
break;
case 0x18: /*NEG b*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteab(0 - dst);
if (cpu_state.abrt)
return 1;
setsub8(0, dst);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
break;
case 0x20: /*MUL AL,b*/
AX = AL * dst;
flags_rebuild();
if (AH)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(13);
PREFETCH_RUN(13, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x28: /*IMUL AL,b*/
tempws = (int) ((int8_t) AL) * (int) ((int8_t) dst);
AX = tempws & 0xffff;
flags_rebuild();
if (((int16_t) AX >> 7) != 0 && ((int16_t) AX >> 7) != -1)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x30: /*DIV AL,b*/
src16 = AX;
if (dst)
tempw = src16 / dst;
if (dst && !(tempw & 0xff00)) {
AH = src16 % dst;
AL = (src16 / dst) & 0xff;
if (!cpu_iscyrix && !is6117) {
flags_rebuild();
cpu_state.flags |= 0x8D5; /*Not a Cyrix*/
cpu_state.flags &= ~1;
}
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES((is486 && !cpu_iscyrix) ? 16 : 14);
PREFETCH_RUN((is486 && !cpu_iscyrix) ? 16 : 14, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x38: /*IDIV AL,b*/
tempws = (int) (int16_t) AX;
if (dst != 0)
tempws2 = tempws / (int) ((int8_t) dst);
temps = tempws2 & 0xff;
if (dst && ((int) temps == tempws2)) {
AH = (tempws % (int) ((int8_t) dst)) & 0xff;
AL = tempws2 & 0xff;
if (!cpu_iscyrix && !is6117) {
flags_rebuild();
cpu_state.flags |= 0x8D5; /*Not a Cyrix*/
cpu_state.flags &= ~1;
}
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES(19);
PREFETCH_RUN(19, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
default:
x86illegal();
}
return 0;
}
static int
opF7_w_a16(uint32_t fetchdat)
{
uint32_t templ;
uint32_t templ2 = 0;
int tempws;
int tempws2 = 0;
int16_t temps16;
uint16_t src;
uint16_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteaw();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*TEST w*/
case 0x08:
src = getword();
if (cpu_state.abrt)
return 1;
setznp16(src & dst);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 4, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x10: /*NOT w*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(~dst);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
break;
case 0x18: /*NEG w*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(0 - dst);
if (cpu_state.abrt)
return 1;
setsub16(0, dst);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
break;
case 0x20: /*MUL AX,w*/
templ = AX * dst;
AX = templ & 0xFFFF;
DX = templ >> 16;
flags_rebuild();
if (DX)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(21);
PREFETCH_RUN(21, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x28: /*IMUL AX,w*/
templ = (int) ((int16_t) AX) * (int) ((int16_t) dst);
AX = templ & 0xFFFF;
DX = templ >> 16;
flags_rebuild();
if (((int32_t) templ >> 15) != 0 && ((int32_t) templ >> 15) != -1)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(22);
PREFETCH_RUN(22, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x30: /*DIV AX,w*/
templ = (DX << 16) | AX;
if (dst)
templ2 = templ / dst;
if (dst && !(templ2 & 0xffff0000)) {
DX = templ % dst;
AX = (templ / dst) & 0xffff;
if (!cpu_iscyrix && !is6117)
setznp16(AX); /*Not a Cyrix*/
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES((is486 && !cpu_iscyrix) ? 24 : 22);
PREFETCH_RUN((is486 && !cpu_iscyrix) ? 24 : 22, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
case 0x38: /*IDIV AX,w*/
tempws = (int) ((DX << 16) | AX);
if (dst)
tempws2 = tempws / (int) ((int16_t) dst);
temps16 = tempws2 & 0xffff;
if ((dst != 0) && ((int) temps16 == tempws2)) {
DX = tempws % (int) ((int16_t) dst);
AX = tempws2 & 0xffff;
if (!cpu_iscyrix && !is6117)
setznp16(AX); /*Not a Cyrix*/
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES(27);
PREFETCH_RUN(27, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
break;
default:
x86illegal();
}
return 0;
}
static int
opF7_w_a32(uint32_t fetchdat)
{
uint32_t templ;
uint32_t templ2 = 0;
int tempws;
int tempws2 = 1;
int16_t temps16;
uint16_t src;
uint16_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteaw();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*TEST w*/
case 0x08:
src = getword();
if (cpu_state.abrt)
return 1;
setznp16(src & dst);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 4, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x10: /*NOT w*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(~dst);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
break;
case 0x18: /*NEG w*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(0 - dst);
if (cpu_state.abrt)
return 1;
setsub16(0, dst);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
break;
case 0x20: /*MUL AX,w*/
templ = AX * dst;
AX = templ & 0xFFFF;
DX = templ >> 16;
flags_rebuild();
if (DX)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(21);
PREFETCH_RUN(21, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x28: /*IMUL AX,w*/
templ = (int) ((int16_t) AX) * (int) ((int16_t) dst);
AX = templ & 0xFFFF;
DX = templ >> 16;
flags_rebuild();
if (((int32_t) templ >> 15) != 0 && ((int32_t) templ >> 15) != -1)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(22);
PREFETCH_RUN(22, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x30: /*DIV AX,w*/
templ = (DX << 16) | AX;
if (dst)
templ2 = templ / dst;
if (dst && !(templ2 & 0xffff0000)) {
DX = templ % dst;
AX = (templ / dst) & 0xffff;
if (!cpu_iscyrix && !is6117)
setznp16(AX); /*Not a Cyrix*/
} else {
// fatal("DIVw BY 0 %04X:%04X %i\n",cs>>4,pc,ins);
x86_int(0);
return 1;
}
CLOCK_CYCLES((is486 && !cpu_iscyrix) ? 24 : 22);
PREFETCH_RUN((is486 && !cpu_iscyrix) ? 24 : 22, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
case 0x38: /*IDIV AX,w*/
tempws = (int) ((DX << 16) | AX);
if (dst)
tempws2 = tempws / (int) ((int16_t) dst);
temps16 = tempws2 & 0xffff;
if ((dst != 0) && ((int) temps16 == tempws2)) {
DX = tempws % (int) ((int16_t) dst);
AX = tempws2 & 0xffff;
if (!cpu_iscyrix && !is6117)
setznp16(AX); /*Not a Cyrix*/
} else {
x86_int(0);
return 1;
}
CLOCK_CYCLES(27);
PREFETCH_RUN(27, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
break;
default:
x86illegal();
}
return 0;
}
static int
opF7_l_a16(uint32_t fetchdat)
{
uint64_t temp64;
uint32_t src;
uint32_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteal();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*TEST l*/
case 0x08:
src = getlong();
if (cpu_state.abrt)
return 1;
setznp32(src & dst);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 5, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
break;
case 0x10: /*NOT l*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(~dst);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mml);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
break;
case 0x18: /*NEG l*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(0 - dst);
if (cpu_state.abrt)
return 1;
setsub32(0, dst);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mml);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
break;
case 0x20: /*MUL EAX,l*/
temp64 = (uint64_t) EAX * (uint64_t) dst;
EAX = temp64 & 0xffffffff;
EDX = temp64 >> 32;
flags_rebuild();
if (EDX)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(21);
PREFETCH_RUN(21, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
break;
case 0x28: /*IMUL EAX,l*/
temp64 = (int64_t) (int32_t) EAX * (int64_t) (int32_t) dst;
EAX = temp64 & 0xffffffff;
EDX = temp64 >> 32;
flags_rebuild();
if (((int64_t) temp64 >> 31) != 0 && ((int64_t) temp64 >> 31) != -1)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(38);
PREFETCH_RUN(38, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
break;
case 0x30: /*DIV EAX,l*/
if (divl(dst))
return 1;
if (!cpu_iscyrix && !is6117)
setznp32(EAX); /*Not a Cyrix*/
CLOCK_CYCLES((is486) ? 40 : 38);
PREFETCH_RUN(is486 ? 40 : 38, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
break;
case 0x38: /*IDIV EAX,l*/
if (idivl((int32_t) dst))
return 1;
if (!cpu_iscyrix && !is6117)
setznp32(EAX); /*Not a Cyrix*/
CLOCK_CYCLES(43);
PREFETCH_RUN(43, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
break;
default:
x86illegal();
}
return 0;
}
static int
opF7_l_a32(uint32_t fetchdat)
{
uint64_t temp64;
uint32_t src;
uint32_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteal();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*TEST l*/
case 0x08:
src = getlong();
if (cpu_state.abrt)
return 1;
setznp32(src & dst);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 5, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
break;
case 0x10: /*NOT l*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(~dst);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mml);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
break;
case 0x18: /*NEG l*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(0 - dst);
if (cpu_state.abrt)
return 1;
setsub32(0, dst);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mml);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
break;
case 0x20: /*MUL EAX,l*/
temp64 = (uint64_t) EAX * (uint64_t) dst;
EAX = temp64 & 0xffffffff;
EDX = temp64 >> 32;
flags_rebuild();
if (EDX)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(21);
PREFETCH_RUN(21, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
break;
case 0x28: /*IMUL EAX,l*/
temp64 = (int64_t) (int32_t) EAX * (int64_t) (int32_t) dst;
EAX = temp64 & 0xffffffff;
EDX = temp64 >> 32;
flags_rebuild();
if (((int64_t) temp64 >> 31) != 0 && ((int64_t) temp64 >> 31) != -1)
cpu_state.flags |= (C_FLAG | V_FLAG);
else
cpu_state.flags &= ~(C_FLAG | V_FLAG);
CLOCK_CYCLES(38);
PREFETCH_RUN(38, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
break;
case 0x30: /*DIV EAX,l*/
if (divl(dst))
return 1;
if (!cpu_iscyrix && !is6117)
setznp32(EAX); /*Not a Cyrix*/
CLOCK_CYCLES((is486) ? 40 : 38);
PREFETCH_RUN(is486 ? 40 : 38, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
break;
case 0x38: /*IDIV EAX,l*/
if (idivl((int32_t) dst))
return 1;
if (!cpu_iscyrix && !is6117)
setznp32(EAX); /*Not a Cyrix*/
CLOCK_CYCLES(43);
PREFETCH_RUN(43, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
break;
default:
x86illegal();
}
return 0;
}
static int
opHLT(uint32_t fetchdat)
{
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
if (smi_line)
enter_smm_check(1);
else if (!((cpu_state.flags & I_FLAG) && pic.int_pending)) {
CLOCK_CYCLES_ALWAYS(100);
if (!((cpu_state.flags & I_FLAG) && pic.int_pending))
cpu_state.pc--;
} else {
CLOCK_CYCLES(5);
}
CPU_BLOCK_END();
PREFETCH_RUN(100, 1, -1, 0, 0, 0, 0, 0);
if (hlt_reset_pending)
softresetx86();
return 0;
}
#ifdef OPS_286_386
static int
opLOCK(uint32_t fetchdat)
{
int legal;
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
if (cpu_state.abrt)
return 0;
cpu_state.pc++;
legal = is_lock_legal(fetchdat);
ILLEGAL_ON(legal == 0);
CLOCK_CYCLES(4);
PREFETCH_PREFIX();
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
#else
static int
opLOCK(uint32_t fetchdat)
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 0;
cpu_state.pc++;
ILLEGAL_ON((fetchdat & 0xff) == 0x90);
CLOCK_CYCLES(4);
PREFETCH_PREFIX();
return x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
#endif
static int
opBOUND_w_a16(uint32_t fetchdat)
{
int16_t low;
int16_t high;
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
low = geteaw();
high = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
if (((int16_t) cpu_state.regs[cpu_reg].w < low) || ((int16_t) cpu_state.regs[cpu_reg].w > high)) {
x86_int(5);
return 1;
}
CLOCK_CYCLES(is486 ? 7 : 10);
PREFETCH_RUN(is486 ? 7 : 10, 2, rmdat, 2, 0, 0, 0, 0);
return 0;
}
static int
opBOUND_w_a32(uint32_t fetchdat)
{
int16_t low;
int16_t high;
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
low = geteaw();
high = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
if (((int16_t) cpu_state.regs[cpu_reg].w < low) || ((int16_t) cpu_state.regs[cpu_reg].w > high)) {
x86_int(5);
return 1;
}
CLOCK_CYCLES(is486 ? 7 : 10);
PREFETCH_RUN(is486 ? 7 : 10, 2, rmdat, 2, 0, 0, 0, 1);
return 0;
}
static int
opBOUND_l_a16(uint32_t fetchdat)
{
int32_t low;
int32_t high;
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
low = geteal();
high = readmeml(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
if (((int32_t) cpu_state.regs[cpu_reg].l < low) || ((int32_t) cpu_state.regs[cpu_reg].l > high)) {
x86_int(5);
return 1;
}
CLOCK_CYCLES(is486 ? 7 : 10);
PREFETCH_RUN(is486 ? 7 : 10, 2, rmdat, 1, 1, 0, 0, 0);
return 0;
}
static int
opBOUND_l_a32(uint32_t fetchdat)
{
int32_t low;
int32_t high;
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
low = geteal();
high = readmeml(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
if (((int32_t) cpu_state.regs[cpu_reg].l < low) || ((int32_t) cpu_state.regs[cpu_reg].l > high)) {
x86_int(5);
return 1;
}
CLOCK_CYCLES(is486 ? 7 : 10);
PREFETCH_RUN(is486 ? 7 : 10, 2, rmdat, 1, 1, 0, 0, 1);
return 0;
}
static int
opCLTS(uint32_t fetchdat)
{
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
cr0 &= ~8;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opINVD(uint32_t fetchdat)
{
CLOCK_CYCLES(1000);
CPU_BLOCK_END();
return 0;
}
static int
opWBINVD(uint32_t fetchdat)
{
CLOCK_CYCLES(10000);
CPU_BLOCK_END();
return 0;
}
static int
opLOADALL(uint32_t fetchdat)
{
if (CPL && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
msw = (msw & 1) | readmemw(0, 0x806);
cpu_state.flags = (readmemw(0, 0x818) & 0xffd5) | 2;
flags_extract();
tr.seg = readmemw(0, 0x816);
cpu_state.pc = readmemw(0, 0x81A);
ldt.seg = readmemw(0, 0x81C);
DS = readmemw(0, 0x81E);
SS = readmemw(0, 0x820);
CS = readmemw(0, 0x822);
ES = readmemw(0, 0x824);
DI = readmemw(0, 0x826);
SI = readmemw(0, 0x828);
BP = readmemw(0, 0x82A);
SP = readmemw(0, 0x82C);
BX = readmemw(0, 0x82E);
DX = readmemw(0, 0x830);
CX = readmemw(0, 0x832);
AX = readmemw(0, 0x834);
es = readmemw(0, 0x836) | (readmemb(0, 0x838) << 16);
cpu_state.seg_es.access = readmemb(0, 0x839);
cpu_state.seg_es.limit = readmemw(0, 0x83A);
cs = readmemw(0, 0x83C) | (readmemb(0, 0x83E) << 16);
cpu_state.seg_cs.access = readmemb(0, 0x83F);
cpu_state.seg_cs.limit = readmemw(0, 0x840);
ss = readmemw(0, 0x842) | (readmemb(0, 0x844) << 16);
cpu_state.seg_ss.access = readmemb(0, 0x845);
cpu_state.seg_ss.limit = readmemw(0, 0x846);
if (cpu_state.seg_ss.base == 0 && cpu_state.seg_ss.limit_low == 0 && cpu_state.seg_ss.limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
ds = readmemw(0, 0x848) | (readmemb(0, 0x84A) << 16);
cpu_state.seg_ds.access = readmemb(0, 0x84B);
cpu_state.seg_ds.limit = readmemw(0, 0x84C);
if (cpu_state.seg_ds.base == 0 && cpu_state.seg_ds.limit_low == 0 && cpu_state.seg_ds.limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
gdt.base = readmemw(0, 0x84E) | (readmemb(0, 0x850) << 16);
gdt.limit = readmemw(0, 0x852);
ldt.base = readmemw(0, 0x854) | (readmemb(0, 0x856) << 16);
ldt.access = readmemb(0, 0x857);
ldt.limit = readmemw(0, 0x858);
idt.base = readmemw(0, 0x85A) | (readmemb(0, 0x85C) << 16);
idt.limit = readmemw(0, 0x85E);
tr.base = readmemw(0, 0x860) | (readmemb(0, 0x862) << 16);
tr.access = readmemb(0, 0x863);
tr.limit = readmemw(0, 0x864);
CLOCK_CYCLES(195);
PREFETCH_RUN(195, 1, -1, 51, 0, 0, 0, 0);
return 0;
}
static void
set_segment_limit(x86seg *s, uint8_t segdat3)
{
if ((s->access & 0x18) != 0x10 || !(s->access & (1 << 2))) /*expand-down*/
{
s->limit_high = s->limit;
s->limit_low = 0;
} else {
s->limit_high = (segdat3 & 0x40) ? 0xffffffff : 0xffff;
s->limit_low = s->limit + 1;
}
}
static void
loadall_load_segment(uint32_t addr, x86seg *s)
{
uint32_t attrib = readmeml(0, addr);
uint32_t segdat3 = (attrib >> 16) & 0xff;
s->access = (attrib >> 8) & 0xff;
s->ar_high = segdat3;
s->base = readmeml(0, addr + 4);
s->limit = readmeml(0, addr + 8);
if (s == &cpu_state.seg_cs)
use32 = (segdat3 & 0x40) ? 0x300 : 0;
if (s == &cpu_state.seg_ss)
stack32 = (segdat3 & 0x40) ? 1 : 0;
cpu_cur_status &= ~(CPU_STATUS_USE32 | CPU_STATUS_STACK32);
if (use32)
cpu_cur_status |= CPU_STATUS_USE32;
if (stack32)
cpu_cur_status |= CPU_STATUS_STACK32;
set_segment_limit(s, segdat3);
if (s == &cpu_state.seg_ds) {
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
}
if (s == &cpu_state.seg_ss) {
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
}
}
static int
opLOADALL386(uint32_t fetchdat)
{
uint32_t la_addr = es + EDI;
cr0 = readmeml(0, la_addr);
cpu_state.flags = readmemw(0, la_addr + 4);
cpu_state.eflags = readmemw(0, la_addr + 6);
flags_extract();
cpu_state.pc = readmeml(0, la_addr + 8);
EDI = readmeml(0, la_addr + 0xC);
ESI = readmeml(0, la_addr + 0x10);
EBP = readmeml(0, la_addr + 0x14);
ESP = readmeml(0, la_addr + 0x18);
EBX = readmeml(0, la_addr + 0x1C);
EDX = readmeml(0, la_addr + 0x20);
ECX = readmeml(0, la_addr + 0x24);
EAX = readmeml(0, la_addr + 0x28);
dr[6] = readmeml(0, la_addr + 0x2C);
dr[7] = readmeml(0, la_addr + 0x30);
tr.seg = readmemw(0, la_addr + 0x34);
ldt.seg = readmemw(0, la_addr + 0x38);
GS = readmemw(0, la_addr + 0x3C);
FS = readmemw(0, la_addr + 0x40);
DS = readmemw(0, la_addr + 0x44);
SS = readmemw(0, la_addr + 0x48);
CS = readmemw(0, la_addr + 0x4C);
ES = readmemw(0, la_addr + 0x50);
loadall_load_segment(la_addr + 0x54, &tr);
loadall_load_segment(la_addr + 0x60, &idt);
loadall_load_segment(la_addr + 0x6c, &gdt);
loadall_load_segment(la_addr + 0x78, &ldt);
loadall_load_segment(la_addr + 0x84, &cpu_state.seg_gs);
loadall_load_segment(la_addr + 0x90, &cpu_state.seg_fs);
loadall_load_segment(la_addr + 0x9c, &cpu_state.seg_ds);
loadall_load_segment(la_addr + 0xa8, &cpu_state.seg_ss);
loadall_load_segment(la_addr + 0xb4, &cpu_state.seg_cs);
loadall_load_segment(la_addr + 0xc0, &cpu_state.seg_es);
if (CPL == 3 && oldcpl != 3)
flushmmucache_nopc();
oldcpl = CPL;
CLOCK_CYCLES(350);
return 0;
}
static int
opCPUID(uint32_t fetchdat)
{
if (CPUID) {
cpu_CPUID();
CLOCK_CYCLES(9);
return 0;
}
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 1;
}
static int
opRDMSR(uint32_t fetchdat)
{
if (cpu_has_feature(CPU_FEATURE_MSR)) {
cpu_RDMSR();
CLOCK_CYCLES(9);
return 0;
}
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 1;
}
static int
opWRMSR(uint32_t fetchdat)
{
if (cpu_has_feature(CPU_FEATURE_MSR)) {
cpu_WRMSR();
CLOCK_CYCLES(9);
return 0;
}
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 1;
}
static int
opRSM(uint32_t fetchdat)
{
if (in_smm) {
leave_smm();
if (smi_latched)
enter_smm(smm_in_hlt);
CPU_BLOCK_END();
return 0;
}
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 1;
}
``` | /content/code_sandbox/src/cpu/x86_ops_misc.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 11,704 |
```c
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/machine.h>
#include "x87_timings.h"
x87_timings_t x87_timings;
x87_timings_t x87_concurrency;
const x87_timings_t x87_timings_8087 = {
.f2xm1 = (310 + 630) / 2,
.fabs = (10 + 17) / 2,
.fadd = (70 + 100) / 2,
.fadd_32 = (90 + 120) / 2,
.fadd_64 = (95 + 125) / 2,
.fbld = (290 + 310) / 2,
.fbstp = (520 + 540) / 2,
.fchs = (10 + 17) / 2,
.fclex = (2 + 8) / 2,
.fcom = (40 + 50) / 2,
.fcom_32 = (60 + 70) / 2,
.fcom_64 = (65 + 75) / 2,
.fcos = 0, /*387+*/
.fincdecstp = (6 + 12) / 2,
.fdisi_eni = (6 + 12) / 2,
.fdiv = (193 + 203) / 2,
.fdiv_32 = (215 + 225) / 2,
.fdiv_64 = (220 + 230) / 2,
.ffree = (9 + 16) / 2,
.fadd_i16 = (102 + 137) / 2,
.fadd_i32 = (108 + 143) / 2,
.fcom_i16 = (72 + 86) / 2,
.fcom_i32 = (78 + 91) / 2,
.fdiv_i16 = (224 + 238) / 2,
.fdiv_i32 = (230 + 243) / 2,
.fild_16 = (46 + 54) / 2,
.fild_32 = (50 + 60) / 2,
.fild_64 = (60 + 68) / 2,
.fmul_i16 = (124 + 138) / 2,
.fmul_i32 = (130 + 144) / 2,
.finit = (2 + 8) / 2,
.fist_16 = (80 + 90) / 2,
.fist_32 = (82 + 92) / 2,
.fist_64 = (94 + 105) / 2,
.fld = (17 + 22) / 2,
.fld_32 = (38 + 56) / 2,
.fld_64 = (40 + 60) / 2,
.fld_80 = (53 + 65) / 2,
.fld_z1 = (11 + 21) / 2,
.fld_const = (15 + 24) / 2,
.fldcw = (7 + 14) / 2,
.fldenv = (35 + 45) / 2,
.fmul = (90 + 145) / 2,
.fmul_32 = (110 + 125) / 2,
.fmul_64 = (154 + 168) / 2,
.fnop = (10 + 16) / 2,
.fpatan = (250 + 800) / 2,
.fprem = (15 + 190) / 2,
.fprem1 = 0, /*387+*/
.fptan = (30 + 540) / 2,
.frndint = (16 + 50) / 2,
.frstor = (197 + 207) / 2,
.fsave = (197 + 207) / 2,
.fscale = (32 + 38) / 2,
.fsetpm = 0, /*287+*/
.fsin_cos = 0, /*387+*/
.fsincos = 0, /*387+*/
.fsqrt = (180 + 186) / 2,
.fst = (15 + 22) / 2,
.fst_32 = (84 + 90) / 2,
.fst_64 = (96 + 104) / 2,
.fst_80 = (52 + 58) / 2,
.fstcw_sw = (12 + 18) / 2,
.fstenv = (40 + 50) / 2,
.ftst = (38 + 48) / 2,
.fucom = 0, /*387+*/
.fwait = 4,
.fxam = (12 + 23) / 2,
.fxch = (10 + 15) / 2,
.fxtract = (27 + 55) / 2,
.fyl2x = (900 + 1100) / 2,
.fyl2xp1 = (700 + 1000) / 2
};
const x87_timings_t x87_timings_80187 = {
.f2xm1 = (310 + 630) / 2,
.fabs = (10 + 17) / 2,
.fadd = (70 + 100) / 2,
.fadd_32 = (90 + 120) / 2,
.fadd_64 = (95 + 125) / 2,
.fbld = (290 + 310) / 2,
.fbstp = (520 + 540) / 2,
.fchs = (10 + 17) / 2,
.fclex = (2 + 8) / 2,
.fcom = (40 + 50) / 2,
.fcom_32 = (60 + 70) / 2,
.fcom_64 = (65 + 75) / 2,
.fcos = 0, /*387+*/
.fincdecstp = (6 + 12) / 2,
.fdisi_eni = (6 + 12) / 2,
.fdiv = (193 + 203) / 2,
.fdiv_32 = (215 + 225) / 2,
.fdiv_64 = (220 + 230) / 2,
.ffree = (9 + 16) / 2,
.fadd_i16 = (102 + 137) / 2,
.fadd_i32 = (108 + 143) / 2,
.fcom_i16 = (72 + 86) / 2,
.fcom_i32 = (78 + 91) / 2,
.fdiv_i16 = (224 + 238) / 2,
.fdiv_i32 = (230 + 243) / 2,
.fild_16 = (46 + 54) / 2,
.fild_32 = (50 + 60) / 2,
.fild_64 = (60 + 68) / 2,
.fmul_i16 = (124 + 138) / 2,
.fmul_i32 = (130 + 144) / 2,
.finit = (2 + 8) / 2,
.fist_16 = (80 + 90) / 2,
.fist_32 = (82 + 92) / 2,
.fist_64 = (94 + 105) / 2,
.fld = (17 + 22) / 2,
.fld_32 = (38 + 56) / 2,
.fld_64 = (40 + 60) / 2,
.fld_80 = (53 + 65) / 2,
.fld_z1 = (11 + 21) / 2,
.fld_const = (15 + 24) / 2,
.fldcw = (7 + 14) / 2,
.fldenv = (35 + 45) / 2,
.fmul = (90 + 145) / 2,
.fmul_32 = (110 + 125) / 2,
.fmul_64 = (154 + 168) / 2,
.fnop = (10 + 16) / 2,
.fpatan = (250 + 800) / 2,
.fprem = (15 + 190) / 2,
.fprem1 = 0, /*387+*/
.fptan = (30 + 540) / 2,
.frndint = (16 + 50) / 2,
.frstor = (197 + 207) / 2,
.fsave = (197 + 207) / 2,
.fscale = (32 + 38) / 2,
.fsetpm = 0, /*287+*/
.fsin_cos = 0, /*387+*/
.fsincos = 0, /*387+*/
.fsqrt = (180 + 186) / 2,
.fst = (15 + 22) / 2,
.fst_32 = (84 + 90) / 2,
.fst_64 = (96 + 104) / 2,
.fst_80 = (52 + 58) / 2,
.fstcw_sw = (12 + 18) / 2,
.fstenv = (40 + 50) / 2,
.ftst = (38 + 48) / 2,
.fucom = 0, /*387+*/
.fwait = 4,
.fxam = (12 + 23) / 2,
.fxch = (10 + 15) / 2,
.fxtract = (27 + 55) / 2,
.fyl2x = (900 + 1100) / 2,
.fyl2xp1 = (700 + 1000) / 2
};
/*Mostly the same as 8087*/
const x87_timings_t x87_timings_287 = {
.f2xm1 = (310 + 630) / 2,
.fabs = (10 + 17) / 2,
.fadd = (70 + 100) / 2,
.fadd_32 = (90 + 120) / 2,
.fadd_64 = (95 + 125) / 2,
.fbld = (290 + 310) / 2,
.fbstp = (520 + 540) / 2,
.fchs = (10 + 17) / 2,
.fclex = (2 + 8) / 2,
.fcom = (40 + 50) / 2,
.fcom_32 = (60 + 70) / 2,
.fcom_64 = (65 + 75) / 2,
.fcos = 0, /*387+*/
.fincdecstp = (6 + 12) / 2,
.fdisi_eni = 2,
.fdiv = (193 + 203) / 2,
.fdiv_32 = (215 + 225) / 2,
.fdiv_64 = (220 + 230) / 2,
.ffree = (9 + 16) / 2,
.fadd_i16 = (102 + 137) / 2,
.fadd_i32 = (108 + 143) / 2,
.fcom_i16 = (72 + 86) / 2,
.fcom_i32 = (78 + 91) / 2,
.fdiv_i16 = (224 + 238) / 2,
.fdiv_i32 = (230 + 243) / 2,
.fild_16 = (46 + 54) / 2,
.fild_32 = (50 + 60) / 2,
.fild_64 = (60 + 68) / 2,
.fmul_i16 = (124 + 138) / 2,
.fmul_i32 = (130 + 144) / 2,
.finit = (2 + 8) / 2,
.fist_16 = (80 + 90) / 2,
.fist_32 = (82 + 92) / 2,
.fist_64 = (94 + 105) / 2,
.fld = (17 + 22) / 2,
.fld_32 = (38 + 56) / 2,
.fld_64 = (40 + 60) / 2,
.fld_80 = (53 + 65) / 2,
.fld_z1 = (11 + 21) / 2,
.fld_const = (15 + 24) / 2,
.fldcw = (7 + 14) / 2,
.fldenv = (35 + 45) / 2,
.fmul = (90 + 145) / 2,
.fmul_32 = (110 + 125) / 2,
.fmul_64 = (154 + 168) / 2,
.fnop = (10 + 16) / 2,
.fpatan = (250 + 800) / 2,
.fprem = (15 + 190) / 2,
.fprem1 = 0, /*387+*/
.fptan = (30 + 540) / 2,
.frndint = (16 + 50) / 2,
.frstor = (197 + 207) / 2,
.fsave = (197 + 207) / 2,
.fscale = (32 + 38) / 2,
.fsetpm = (2 + 8) / 2, /*287+*/
.fsin_cos = 0, /*387+*/
.fsincos = 0, /*387+*/
.fsqrt = (180 + 186) / 2,
.fst = (15 + 22) / 2,
.fst_32 = (84 + 90) / 2,
.fst_64 = (96 + 104) / 2,
.fst_80 = (52 + 58) / 2,
.fstcw_sw = (12 + 18) / 2,
.fstenv = (40 + 50) / 2,
.ftst = (38 + 48) / 2,
.fucom = 0, /*387+*/
.fwait = 3,
.fxam = (12 + 23) / 2,
.fxch = (10 + 15) / 2,
.fxtract = (27 + 55) / 2,
.fyl2x = (900 + 1100) / 2,
.fyl2xp1 = (700 + 1000) / 2
};
const x87_timings_t x87_timings_387 = {
.f2xm1 = (211 + 476) / 2,
.fabs = 22,
.fadd = (23 + 34) / 2,
.fadd_32 = (24 + 32) / 2,
.fadd_64 = (29 + 37) / 2,
.fbld = (266 + 275) / 2,
.fbstp = (512 + 534) / 2,
.fchs = (24 + 25) / 2,
.fclex = 11,
.fcom = 24,
.fcom_32 = 26,
.fcom_64 = 31,
.fcos = (122 + 772) / 2,
.fincdecstp = 22,
.fdisi_eni = 2,
.fdiv = (88 + 91) / 2,
.fdiv_32 = 89,
.fdiv_64 = 94,
.ffree = 18,
.fadd_i16 = (71 + 85) / 2,
.fadd_i32 = (57 + 72) / 2,
.fcom_i16 = (71 + 75) / 2,
.fcom_i32 = (56 + 63) / 2,
.fdiv_i16 = (136 + 140) / 2,
.fdiv_i32 = (120 + 127) / 2,
.fild_16 = (61 + 65) / 2,
.fild_32 = (45 + 52) / 2,
.fild_64 = (56 + 67) / 2,
.fmul_i16 = (76 + 87) / 2,
.fmul_i32 = (61 + 82) / 2,
.finit = 33,
.fist_16 = (82 + 95) / 2,
.fist_32 = (79 + 93) / 2,
.fist_64 = (80 + 97) / 2,
.fld = 14,
.fld_32 = 20,
.fld_64 = 25,
.fld_80 = 44,
.fld_z1 = (20 + 24) / 2,
.fld_const = 40,
.fldcw = 19,
.fldenv = 71,
.fmul = (29 + 57) / 2,
.fmul_32 = (27 + 35) / 2,
.fmul_64 = (32 + 57) / 2,
.fnop = 12,
.fpatan = (314 + 487) / 2,
.fprem = (74 + 155) / 2,
.fprem1 = (95 + 185) / 2,
.fptan = (191 + 497) / 2,
.frndint = (66 + 80) / 2,
.frstor = 308,
.fsave = 375,
.fscale = (67 + 86) / 2,
.fsetpm = 12,
.fsin_cos = (122 + 771) / 2,
.fsincos = (194 + 809) / 2,
.fsqrt = (122 + 129) / 2,
.fst = 11,
.fst_32 = 44,
.fst_64 = 45,
.fst_80 = 53,
.fstcw_sw = 15,
.fstenv = 103,
.ftst = 28,
.fucom = 24,
.fwait = 6,
.fxam = (30 + 38) / 2,
.fxch = 18,
.fxtract = (70 + 76) / 2,
.fyl2x = (120 + 538) / 2,
.fyl2xp1 = (257 + 547) / 2
};
const x87_timings_t x87_timings_486 = {
.f2xm1 = (140 + 270) / 2,
.fabs = 3,
.fadd = (8 + 20) / 2,
.fadd_32 = (8 + 20) / 2,
.fadd_64 = (8 + 20) / 2,
.fbld = (70 + 103) / 2,
.fbstp = (172 + 176) / 2,
.fchs = 6,
.fclex = 7,
.fcom = 4,
.fcom_32 = 4,
.fcom_64 = 4,
.fcos = (257 + 354) / 2,
.fincdecstp = 3,
.fdisi_eni = 3,
.fdiv = 73,
.fdiv_32 = 73,
.fdiv_64 = 73,
.ffree = 3,
.fadd_i16 = (20 + 35) / 2,
.fadd_i32 = (19 + 32) / 2,
.fcom_i16 = (16 + 20) / 2,
.fcom_i32 = (15 + 17) / 2,
.fdiv_i16 = (85 + 89) / 2,
.fdiv_i32 = (84 + 86) / 2,
.fild_16 = (13 + 16) / 2,
.fild_32 = (9 + 12) / 2,
.fild_64 = (10 + 18) / 2,
.fmul_i16 = (23 + 27) / 2,
.fmul_i32 = (22 + 24) / 2,
.finit = 17,
.fist_16 = (29 + 34) / 2,
.fist_32 = (28 + 34) / 2,
.fist_64 = (29 + 34) / 2,
.fld = 4,
.fld_32 = 3,
.fld_64 = 3,
.fld_80 = 6,
.fld_z1 = 4,
.fld_const = 8,
.fldcw = 4,
.fldenv = 34,
.fmul = 16,
.fmul_32 = 11,
.fmul_64 = 14,
.fnop = 3,
.fpatan = (218 + 303) / 2,
.fprem = (70 + 138) / 2,
.fprem1 = (72 + 167) / 2,
.fptan = (200 + 273) / 2,
.frndint = (21 + 30) / 2,
.frstor = 120,
.fsave = 143,
.fscale = (30 + 32) / 2,
.fsetpm = 3,
.fsin_cos = (257 + 354) / 2,
.fsincos = (292 + 365) / 2,
.fsqrt = (83 + 87) / 2,
.fst = 3,
.fst_32 = 7,
.fst_64 = 8,
.fst_80 = 6,
.fstcw_sw = 3,
.fstenv = 56,
.ftst = 4,
.fucom = 4,
.fwait = (1 + 3) / 2,
.fxam = 8,
.fxch = 4,
.fxtract = (16 + 20) / 2,
.fyl2x = (196 + 329) / 2,
.fyl2xp1 = (171 + 326) / 2
};
/* this should be used for FPUs with no concurrency.
some pre-486DX Cyrix FPUs reportedly are like this. */
const x87_timings_t x87_concurrency_none = {
.f2xm1 = 0,
.fabs = 0,
.fadd = 0,
.fadd_32 = 0,
.fadd_64 = 0,
.fbld = 0,
.fbstp = 0,
.fchs = 0,
.fclex = 0,
.fcom = 0,
.fcom_32 = 0,
.fcom_64 = 0,
.fcos = 0,
.fincdecstp = 0,
.fdisi_eni = 0,
.fdiv = 0,
.fdiv_32 = 0,
.fdiv_64 = 0,
.ffree = 0,
.fadd_i16 = 0,
.fadd_i32 = 0,
.fcom_i16 = 0,
.fcom_i32 = 0,
.fdiv_i16 = 0,
.fdiv_i32 = 0,
.fild_16 = 0,
.fild_32 = 0,
.fild_64 = 0,
.fmul_i16 = 0,
.fmul_i32 = 0,
.finit = 0,
.fist_16 = 0,
.fist_32 = 0,
.fist_64 = 0,
.fld = 0,
.fld_32 = 0,
.fld_64 = 0,
.fld_80 = 0,
.fld_z1 = 0,
.fld_const = 0,
.fldcw = 0,
.fldenv = 0,
.fmul = 0,
.fmul_32 = 0,
.fmul_64 = 0,
.fnop = 0,
.fpatan = 0,
.fprem = 0,
.fprem1 = 0,
.fptan = 0,
.frndint = 0,
.frstor = 0,
.fsave = 0,
.fscale = 0,
.fsetpm = 0,
.fsin_cos = 0,
.fsincos = 0,
.fsqrt = 0,
.fst = 0,
.fst_32 = 0,
.fst_64 = 0,
.fst_80 = 0,
.fstcw_sw = 0,
.fstenv = 0,
.ftst = 0,
.fucom = 0,
.fwait = 0,
.fxam = 0,
.fxch = 0,
.fxtract = 0,
.fyl2x = 0,
.fyl2xp1 = 0,
};
const x87_timings_t x87_concurrency_486 = {
.f2xm1 = 2,
.fabs = 0,
.fadd = 7,
.fadd_32 = 7,
.fadd_64 = 7,
.fbld = 8,
.fbstp = 0,
.fchs = 0,
.fclex = 0,
.fcom = 1,
.fcom_32 = 1,
.fcom_64 = 1,
.fcos = 2,
.fincdecstp = 0,
.fdisi_eni = 0,
.fdiv = 70,
.fdiv_32 = 70,
.fdiv_64 = 70,
.ffree = 0,
.fadd_i16 = 7,
.fadd_i32 = 7,
.fcom_i16 = 1,
.fcom_i32 = 1,
.fdiv_i16 = 70,
.fdiv_i32 = 70,
.fild_16 = 4,
.fild_32 = 4,
.fild_64 = 8,
.fmul_i16 = 8,
.fmul_i32 = 8,
.finit = 0,
.fist_16 = 0,
.fist_32 = 0,
.fist_64 = 0,
.fld = 0,
.fld_32 = 0,
.fld_64 = 0,
.fld_80 = 0,
.fld_z1 = 0,
.fld_const = 2,
.fldcw = 0,
.fldenv = 0,
.fmul = 13,
.fmul_32 = 8,
.fmul_64 = 11,
.fnop = 0,
.fpatan = 5,
.fprem = 2,
.fprem1 = 6,
.fptan = 70,
.frndint = 0,
.frstor = 0,
.fsave = 0,
.fscale = 2,
.fsetpm = 0,
.fsin_cos = 2,
.fsincos = 2,
.fsqrt = 70,
.fst = 0,
.fst_32 = 0,
.fst_64 = 0,
.fst_80 = 0,
.fstcw_sw = 0,
.fstenv = 0,
.ftst = 1,
.fucom = 1,
.fwait = 0,
.fxam = 0,
.fxch = 0,
.fxtract = 4,
.fyl2x = 13,
.fyl2xp1 = 13,
};
``` | /content/code_sandbox/src/cpu/x87_timings.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 7,196 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* Functions common to all emulated x86 CPU's.
*
* Authors: Andrew Jenner, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#include <math.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <wchar.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include "x86.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include <86box/machine.h>
#include <86box/device.h>
#include <86box/dma.h>
#include <86box/io.h>
#include <86box/keyboard.h>
#include <86box/mem.h>
#include <86box/rom.h>
#include <86box/nmi.h>
#include <86box/pic.h>
#include <86box/pci.h>
#include <86box/ppi.h>
#include <86box/timer.h>
#include <86box/video.h>
#include <86box/vid_svga.h>
/* The opcode of the instruction currently being executed. */
uint8_t opcode;
/* The tables to speed up the setting of the Z, N, and P cpu_state.flags. */
uint8_t znptable8[256];
uint16_t znptable16[65536];
/* A 16-bit zero, needed because some speed-up arrays contain pointers to it. */
uint16_t zero = 0;
/* MOD and R/M stuff. */
uint16_t *mod1add[2][8];
uint32_t *mod1seg[8];
uint32_t rmdat;
/* XT CPU multiplier. */
uint64_t xt_cpu_multi;
/* Variables for handling the non-maskable interrupts. */
int nmi = 0;
int nmi_auto_clear = 0;
/* Was the CPU ever reset? */
int x86_was_reset = 0;
int soft_reset_pci = 0;
/* Is the TRAP flag on? */
int trap = 0;
/* The current effective address's segment. */
uint32_t easeg;
/* This is for the OPTI 283 special reset handling mode. */
int reset_on_hlt;
int hlt_reset_pending;
int fpu_cycles = 0;
int in_lock = 0;
#ifdef ENABLE_X86_LOG
#if 0
void dumpregs(int);
#endif
int x86_do_log = ENABLE_X86_LOG;
int indump = 0;
static void
x86_log(const char *fmt, ...)
{
va_list ap;
if (x86_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#if 0
void
dumpregs(int force)
{
int c;
char *seg_names[4] = { "ES", "CS", "SS", "DS" };
/* Only dump when needed, and only once.. */
if (indump || (!force && !dump_on_exit))
return;
x86_log("EIP=%08X CS=%04X DS=%04X ES=%04X SS=%04X FLAGS=%04X\n",
cpu_state.pc, CS, DS, ES, SS, cpu_state.flags);
x85_log("Old CS:EIP: %04X:%08X; %i ins\n", oldcs, cpu_state.oldpc, ins);
for (c = 0; c < 4; c++) {
x86_log("%s : base=%06X limit=%08X access=%02X limit_low=%08X limit_high=%08X\n",
seg_names[c], _opseg[c]->base, _opseg[c]->limit,
_opseg[c]->access, _opseg[c]->limit_low, _opseg[c]->limit_high);
}
if (is386) {
x86_log("FS : base=%06X limit=%08X access=%02X limit_low=%08X limit_high=%08X\n",
seg_fs, cpu_state.seg_fs.limit, cpu_state.seg_fs.access, cpu_state.seg_fs.limit_low,
cpu_state.seg_fs.limit_high);
x86_log("GS : base=%06X limit=%08X access=%02X limit_low=%08X limit_high=%08X\n",
gs, cpu_state.seg_gs.limit, cpu_state.seg_gs.access, cpu_state.seg_gs.limit_low,
cpu_state.seg_gs.limit_high);
x86_log("GDT : base=%06X limit=%04X\n", gdt.base, gdt.limit);
x86_log("LDT : base=%06X limit=%04X\n", ldt.base, ldt.limit);
x86_log("IDT : base=%06X limit=%04X\n", idt.base, idt.limit);
x86_log("TR : base=%06X limit=%04X\n", tr.base, tr.limit);
x86_log("386 in %s mode: %i-bit data, %-i-bit stack\n",
(msw & 1) ? ((cpu_state.eflags & VM_FLAG) ? "V86" : "protected") : "real",
(use32) ? 32 : 16, (stack32) ? 32 : 16);
x86_log("CR0=%08X CR2=%08X CR3=%08X CR4=%08x\n", cr0, cr2, cr3, cr4);
x86_log("EAX=%08X EBX=%08X ECX=%08X EDX=%08X\nEDI=%08X ESI=%08X EBP=%08X ESP=%08X\n",
EAX, EBX, ECX, EDX, EDI, ESI, EBP, ESP);
} else {
x86_log("808x/286 in %s mode\n", (msw & 1) ? "protected" : "real");
x86_log("AX=%04X BX=%04X CX=%04X DX=%04X DI=%04X SI=%04X BP=%04X SP=%04X\n",
AX, BX, CX, DX, DI, SI, BP, SP);
}
x86_log("Entries in readlookup : %i writelookup : %i\n", readlnum, writelnum);
x87_dumpregs();
indump = 0;
}
#endif
#else
# define x86_log(fmt, ...)
#endif
/* Preparation of the various arrays needed to speed up the MOD and R/M work. */
static void
makemod1table(void)
{
mod1add[0][0] = &BX;
mod1add[0][1] = &BX;
mod1add[0][2] = &BP;
mod1add[0][3] = &BP;
mod1add[0][4] = &SI;
mod1add[0][5] = &DI;
mod1add[0][6] = &BP;
mod1add[0][7] = &BX;
mod1add[1][0] = &SI;
mod1add[1][1] = &DI;
mod1add[1][2] = &SI;
mod1add[1][3] = &DI;
mod1add[1][4] = &zero;
mod1add[1][5] = &zero;
mod1add[1][6] = &zero;
mod1add[1][7] = &zero;
mod1seg[0] = &ds;
mod1seg[1] = &ds;
mod1seg[2] = &ss;
mod1seg[3] = &ss;
mod1seg[4] = &ds;
mod1seg[5] = &ds;
mod1seg[6] = &ss;
mod1seg[7] = &ds;
}
/* Prepare the ZNP table needed to speed up the setting of the Z, N, and P cpu_state.flags. */
static void
makeznptable(void)
{
int c;
int d;
int e;
for (c = 0; c < 256; c++) {
d = 0;
for (e = 0; e < 8; e++) {
if (c & (1 << e))
d++;
}
if (d & 1)
znptable8[c] = 0;
else
znptable8[c] = P_FLAG;
#ifdef ENABLE_X86_LOG
if (c == 0xb1)
x86_log("znp8 b1 = %i %02X\n", d, znptable8[c]);
#endif
if (!c)
znptable8[c] |= Z_FLAG;
if (c & 0x80)
znptable8[c] |= N_FLAG;
}
for (c = 0; c < 65536; c++) {
d = 0;
for (e = 0; e < 8; e++) {
if (c & (1 << e))
d++;
}
if (d & 1)
znptable16[c] = 0;
else
znptable16[c] = P_FLAG;
#ifdef ENABLE_X86_LOG
if (c == 0xb1)
x86_log("znp16 b1 = %i %02X\n", d, znptable16[c]);
if (c == 0x65b1)
x86_log("znp16 65b1 = %i %02X\n", d, znptable16[c]);
#endif
if (!c)
znptable16[c] |= Z_FLAG;
if (c & 0x8000)
znptable16[c] |= N_FLAG;
}
}
/* Common reset function. */
static void
reset_common(int hard)
{
#ifdef ENABLE_X86_LOG
if (hard)
x86_log("x86 reset\n");
#endif
if (!hard && reset_on_hlt) {
hlt_reset_pending++;
if (hlt_reset_pending == 2)
hlt_reset_pending = 0;
else
return;
}
/* Make sure to gracefully leave SMM. */
if (in_smm)
leave_smm();
/* Needed for the ALi M1533. */
if (is486 && (hard || soft_reset_pci)) {
pci_reset();
if (!hard && soft_reset_pci) {
dma_reset();
/* TODO: Hack, but will do for time being, because all AT machines currently are 286+,
and vice-versa. */
dma_set_at(is286);
device_reset_all(DEVICE_ALL);
}
}
use32 = 0;
cpu_cur_status = 0;
stack32 = 0;
msr.fcr = (1 << 8) | (1 << 9) | (1 << 12) | (1 << 16) | (1 << 19) | (1 << 21);
msw = 0;
if (hascache)
cr0 = 1 << 30;
else
cr0 = 0;
if (is386 && !is486 && (fpu_type == FPU_387))
cr0 |= 0x10;
cpu_cache_int_enabled = 0;
cpu_update_waitstates();
cr4 = 0;
cpu_state.eflags = 0;
cgate32 = 0;
#ifdef USE_DEBUG_REGS_486
if (is386) {
#else
if (is386 && !is486) {
#endif
for (uint8_t i = 0; i < 4; i++)
dr[i] = 0x00000000;
dr[6] = 0xffff1ff0;
dr[7] = 0x00000400;
}
if (is286) {
if (is486)
loadcs(0xF000);
else
loadcs_2386(0xF000);
cpu_state.pc = 0xFFF0;
if (hard) {
rammask = cpu_16bitbus ? 0xFFFFFF : 0xFFFFFFFF;
if (is6117)
rammask |= 0x03000000;
mem_a20_key = mem_a20_alt = mem_a20_state = 0;
}
}
idt.base = 0;
cpu_state.flags = 2;
trap = 0;
idt.limit = is386 ? 0x03ff : 0xffff;
if (is386 || hard)
EAX = EBX = ECX = EDX = ESI = EDI = EBP = ESP = 0;
if (hard) {
makeznptable();
resetreadlookup();
makemod1table();
cpu_set_edx();
mmu_perm = 4;
}
x86seg_reset();
#ifdef USE_DYNAREC
if (hard)
codegen_reset();
#endif
if (!hard)
flushmmucache();
x86_was_reset = 1;
cpu_alt_reset = 0;
cpu_ven_reset();
in_smm = smi_latched = 0;
smi_line = smm_in_hlt = 0;
smi_block = 0;
if (hard) {
if (is486)
smbase = is_am486dxl ? 0x00060000 : 0x00030000;
ppi_reset();
}
in_sys = 0;
shadowbios = shadowbios_write = 0;
alt_access = cpu_end_block_after_ins = 0;
if (hard) {
reset_on_hlt = hlt_reset_pending = 0;
cache_index = 0;
memset(_tr, 0x00, sizeof(_tr));
memset(_cache, 0x00, sizeof(_cache));
/* If we have an AT or PS/2 keyboard controller, make sure the A20 state
is correct. */
device_reset_all(DEVICE_KBC);
} else
device_reset_all(DEVICE_SOFTRESET);
if (!is286)
reset_808x(hard);
in_lock = 0;
cpu_cpurst_on_sr = 0;
}
/* Hard reset. */
void
resetx86(void)
{
reset_common(1);
soft_reset_mask = 0;
}
/* Soft reset. */
void
softresetx86(void)
{
if (soft_reset_mask)
return;
if (ibm8514_active || xga_active)
vga_on = 1;
reset_common(0);
}
/* Actual hard reset. */
void
hardresetx86(void)
{
dma_reset();
/* TODO: Hack, but will do for time being, because all AT machines currently are 286+,
and vice-versa. */
dma_set_at(is286);
device_reset_all(DEVICE_ALL);
cpu_alt_reset = 0;
mem_a20_alt = 0;
mem_a20_recalc();
flushmmucache();
resetx86();
}
``` | /content/code_sandbox/src/cpu/x86.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 3,403 |
```c
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <wchar.h>
#define fplog 0
#include <math.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/pic.h>
#include "x86.h"
#include "x86_flags.h"
#include "x86_ops.h"
#include "x86seg_common.h"
#include "x87_sf.h"
#include "x87.h"
#include "386_common.h"
#include "softfloat3e/config.h"
#include "softfloat3e/fpu_trans.h"
#include "softfloat3e/specialize.h"
uint32_t x87_pc_off;
uint32_t x87_op_off;
uint16_t x87_pc_seg;
uint16_t x87_op_seg;
#ifdef ENABLE_FPU_X87_LOG
int fpu_x87_do_log = ENABLE_FPU_X87_LOG;
void
fpu_x87_log(const char *fmt, ...)
{
va_list ap;
if (fpu_x87_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define fpu_x87_log(fmt, ...)
#endif
#ifdef USE_NEW_DYNAREC
uint16_t
x87_gettag(void)
{
uint16_t ret = 0;
for (uint8_t c = 0; c < 8; c++) {
if (cpu_state.tag[c] == TAG_EMPTY)
ret |= X87_TAG_EMPTY << (c * 2);
else if (cpu_state.tag[c] & TAG_UINT64)
ret |= 2 << (c * 2);
else if (cpu_state.ST[c] == 0.0 && !cpu_state.ismmx)
ret |= X87_TAG_ZERO << (c * 2);
else
ret |= X87_TAG_VALID << (c * 2);
}
return ret;
}
void
x87_settag(uint16_t new_tag)
{
for (uint8_t c = 0; c < 8; c++) {
int tag = (new_tag >> (c * 2)) & 3;
if (tag == X87_TAG_EMPTY)
cpu_state.tag[c] = TAG_EMPTY;
else if (tag == 2)
cpu_state.tag[c] = TAG_VALID | TAG_UINT64;
else
cpu_state.tag[c] = TAG_VALID;
}
}
#else
uint16_t
x87_gettag(void)
{
uint16_t ret = 0;
int c;
for (c = 0; c < 8; c++) {
if (cpu_state.tag[c] & TAG_UINT64)
ret |= 2 << (c * 2);
else
ret |= (cpu_state.tag[c] << (c * 2));
}
return ret;
}
void
x87_settag(uint16_t new_tag)
{
cpu_state.tag[0] = new_tag & 3;
cpu_state.tag[1] = (new_tag >> 2) & 3;
cpu_state.tag[2] = (new_tag >> 4) & 3;
cpu_state.tag[3] = (new_tag >> 6) & 3;
cpu_state.tag[4] = (new_tag >> 8) & 3;
cpu_state.tag[5] = (new_tag >> 10) & 3;
cpu_state.tag[6] = (new_tag >> 12) & 3;
cpu_state.tag[7] = (new_tag >> 14) & 3;
}
#endif
static extFloat80_t
FPU_handle_NaN32_Func(extFloat80_t a, int aIsNaN, float32 b32, int bIsNaN, struct softfloat_status_t *status)
{
int aIsSignalingNaN = extF80_isSignalingNaN(a);
int bIsSignalingNaN = f32_isSignalingNaN(b32);
if (aIsSignalingNaN | bIsSignalingNaN)
softfloat_raiseFlags(status, softfloat_flag_invalid);
// propagate QNaN to SNaN
a = softfloat_propagateNaNExtF80UI(a.signExp, a.signif, 0, 0, status);
if (aIsNaN & !bIsNaN)
return a;
// float32 is NaN so conversion will propagate SNaN to QNaN and raise
// appropriate exception flags
extFloat80_t b = f32_to_extF80(b32, status);
if (aIsSignalingNaN) {
if (bIsSignalingNaN)
goto returnLargerSignificand;
return bIsNaN ? b : a;
} else if (aIsNaN) {
if (bIsSignalingNaN)
return a;
returnLargerSignificand:
if (a.signif < b.signif)
return b;
if (b.signif < a.signif)
return a;
return (a.signExp < b.signExp) ? a : b;
} else {
return b;
}
}
int
FPU_handle_NaN32(extFloat80_t a, float32 b, extFloat80_t *r, struct softfloat_status_t *status)
{
/*your_sha256_hash------------
| The pattern for a default generated extended double-precision NaN.
*your_sha256_hash------------*/
const floatx80 floatx80_default_nan =
packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
if (extF80_isUnsupported(a)) {
softfloat_raiseFlags(status, softfloat_flag_invalid);
*r = floatx80_default_nan;
return 1;
}
int aIsNaN = extF80_isNaN(a);
int bIsNaN = f32_isNaN(b);
if (aIsNaN | bIsNaN) {
*r = FPU_handle_NaN32_Func(a, aIsNaN, b, bIsNaN, status);
return 1;
}
return 0;
}
static extFloat80_t
FPU_handle_NaN64_Func(extFloat80_t a, int aIsNaN, float64 b64, int bIsNaN, struct softfloat_status_t *status)
{
int aIsSignalingNaN = extF80_isSignalingNaN(a);
int bIsSignalingNaN = f64_isSignalingNaN(b64);
if (aIsSignalingNaN | bIsSignalingNaN)
softfloat_raiseFlags(status, softfloat_flag_invalid);
// propagate QNaN to SNaN
a = softfloat_propagateNaNExtF80UI(a.signExp, a.signif, 0, 0, status);
if (aIsNaN & !bIsNaN)
return a;
// float64 is NaN so conversion will propagate SNaN to QNaN and raise
// appropriate exception flags
extFloat80_t b = f64_to_extF80(b64, status);
if (aIsSignalingNaN) {
if (bIsSignalingNaN)
goto returnLargerSignificand;
return bIsNaN ? b : a;
} else if (aIsNaN) {
if (bIsSignalingNaN)
return a;
returnLargerSignificand:
if (a.signif < b.signif)
return b;
if (b.signif < a.signif)
return a;
return (a.signExp < b.signExp) ? a : b;
} else {
return b;
}
}
int
FPU_handle_NaN64(extFloat80_t a, float64 b, extFloat80_t *r, struct softfloat_status_t *status)
{
/*your_sha256_hash------------
| The pattern for a default generated extended double-precision NaN.
*your_sha256_hash------------*/
const extFloat80_t floatx80_default_nan =
packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
if (extF80_isUnsupported(a)) {
softfloat_raiseFlags(status, softfloat_flag_invalid);
*r = floatx80_default_nan;
return 1;
}
int aIsNaN = extF80_isNaN(a);
int bIsNaN = f64_isNaN(b);
if (aIsNaN | bIsNaN) {
*r = FPU_handle_NaN64_Func(a, aIsNaN, b, bIsNaN, status);
return 1;
}
return 0;
}
struct softfloat_status_t
i387cw_to_softfloat_status_word(uint16_t control_word)
{
struct softfloat_status_t status;
int precision = control_word & FPU_CW_PC;
switch (precision) {
case FPU_PR_32_BITS:
status.extF80_roundingPrecision = 32;
break;
case FPU_PR_64_BITS:
status.extF80_roundingPrecision = 64;
break;
case FPU_PR_80_BITS:
status.extF80_roundingPrecision = 80;
break;
default:
/* With the precision control bits set to 01 "(reserved)", a
real CPU behaves as if the precision control bits were
set to 11 "80 bits" */
status.extF80_roundingPrecision = 80;
break;
}
status.softfloat_exceptionFlags = 0; // clear exceptions before execution
status.softfloat_roundingMode = (control_word & FPU_CW_RC) >> 10;
status.softfloat_flush_underflow_to_zero = 0;
status.softfloat_suppressException = 0;
status.softfloat_exceptionMasks = control_word & FPU_CW_Exceptions_Mask;
status.softfloat_denormals_are_zeros = 0;
return status;
}
int
FPU_status_word_flags_fpu_compare(int float_relation)
{
switch (float_relation) {
case softfloat_relation_unordered:
return (FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
case softfloat_relation_greater:
return 0;
case softfloat_relation_less:
return FPU_SW_C0;
case softfloat_relation_equal:
return FPU_SW_C3;
default:
break;
}
return (-1); // should never get here
}
void
FPU_write_eflags_fpu_compare(int float_relation)
{
switch (float_relation) {
case softfloat_relation_unordered:
cpu_state.flags |= (Z_FLAG | P_FLAG | C_FLAG);
break;
case softfloat_relation_greater:
break;
case softfloat_relation_less:
cpu_state.flags |= C_FLAG;
break;
case softfloat_relation_equal:
cpu_state.flags |= Z_FLAG;
break;
default:
break;
}
}
uint16_t
FPU_exception(uint32_t fetchdat, uint16_t exceptions, int store)
{
uint16_t status;
uint16_t unmasked;
/* Extract only the bits which we use to set the status word */
exceptions &= FPU_SW_Exceptions_Mask;
status = fpu_state.swd;
unmasked = (exceptions & ~fpu_state.cwd) & FPU_CW_Exceptions_Mask;
// if IE or DZ exception happen nothing else will be reported
if (exceptions & (FPU_EX_Invalid | FPU_EX_Zero_Div)) {
unmasked &= (FPU_EX_Invalid | FPU_EX_Zero_Div);
}
/* Set summary bits if exception isn't masked */
if (unmasked) {
fpu_state.swd |= (FPU_SW_Summary | FPU_SW_Backward);
}
if (exceptions & FPU_EX_Invalid) {
// FPU_EX_Invalid cannot come with any other exception but x87 stack fault
fpu_state.swd |= exceptions;
if (exceptions & FPU_SW_Stack_Fault) {
if (!(exceptions & FPU_SW_C1)) {
/* This bit distinguishes over- from underflow for a stack fault,
and roundup from round-down for precision loss. */
fpu_state.swd &= ~FPU_SW_C1;
}
}
return unmasked;
}
if (exceptions & FPU_EX_Zero_Div) {
fpu_state.swd |= FPU_EX_Zero_Div;
if (!(fpu_state.cwd & FPU_EX_Zero_Div)) {
#ifdef FPU_8087
if (!(fpu_state.cwd & FPU_SW_Summary)) {
fpu_state.cwd |= FPU_SW_Summary;
nmi = 1;
}
#else
picint(1 << 13);
#endif // FPU_8087
}
return unmasked;
}
if (exceptions & FPU_EX_Denormal) {
fpu_state.swd |= FPU_EX_Denormal;
if (unmasked & FPU_EX_Denormal) {
return (unmasked & FPU_EX_Denormal);
}
}
/* Set the corresponding exception bits */
fpu_state.swd |= exceptions;
if (exceptions & FPU_EX_Precision) {
if (!(exceptions & FPU_SW_C1)) {
/* This bit distinguishes over- from underflow for a stack fault,
and roundup from round-down for precision loss. */
fpu_state.swd &= ~FPU_SW_C1;
}
}
// If #P unmasked exception occurred the result still has to be
// written to the destination.
unmasked &= ~FPU_EX_Precision;
if (unmasked & (FPU_EX_Underflow | FPU_EX_Overflow)) {
// If unmasked over- or underflow occurs and dest is a memory location:
// - the TOS and destination operands remain unchanged
// - the inexact-result condition is not reported and C1 flag is cleared
// - no result is stored in the memory
// If the destination is in the register stack, adjusted resulting value
// is stored in the destination operand.
if (!store)
unmasked &= ~(FPU_EX_Underflow | FPU_EX_Overflow);
else {
fpu_state.swd &= ~FPU_SW_C1;
if (!(status & FPU_EX_Precision))
fpu_state.swd &= ~FPU_EX_Precision;
}
}
return unmasked;
}
void
FPU_stack_overflow(uint32_t fetchdat)
{
/*your_sha256_hash------------
| The pattern for a default generated extended double-precision NaN.
*your_sha256_hash------------*/
const floatx80 floatx80_default_nan =
packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
/* The masked response */
if (is_IA_masked()) {
FPU_push();
FPU_save_regi(floatx80_default_nan, 0);
}
FPU_exception(fetchdat, FPU_EX_Stack_Overflow, 0);
}
void
FPU_stack_underflow(uint32_t fetchdat, int stnr, int pop_stack)
{
/*your_sha256_hash------------
| The pattern for a default generated extended double-precision NaN.
*your_sha256_hash------------*/
const floatx80 floatx80_default_nan =
packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
/* The masked response */
if (is_IA_masked()) {
FPU_save_regi(floatx80_default_nan, stnr);
if (pop_stack)
FPU_pop();
}
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
}
/* -----------------------------------------------------------
* Slimmed down version used to compile against a CPU simulator
* rather than a kernel (ported by Kevin Lawton)
* ------------------------------------------------------------ */
int
FPU_tagof(const extFloat80_t reg)
{
int32_t exp = extF80_exp(reg);
if (exp == 0) {
if (!extF80_fraction(reg))
return X87_TAG_ZERO;
/* The number is a de-normal or pseudodenormal. */
return X87_TAG_INVALID;
}
if (exp == 0x7fff) {
/* Is an Infinity, a NaN, or an unsupported data type. */
return X87_TAG_INVALID;
}
if (!(reg.signif & BX_CONST64(0x8000000000000000))) {
/* Unsupported data type. */
/* Valid numbers have the ms bit set to 1. */
return X87_TAG_INVALID;
}
return X87_TAG_VALID;
}
uint8_t
pack_FPU_TW(uint16_t twd)
{
uint8_t tag_byte = 0;
if ((twd & 0x0003) != 0x0003)
tag_byte |= 0x01;
if ((twd & 0x000c) != 0x000c)
tag_byte |= 0x02;
if ((twd & 0x0030) != 0x0030)
tag_byte |= 0x04;
if ((twd & 0x00c0) != 0x00c0)
tag_byte |= 0x08;
if ((twd & 0x0300) != 0x0300)
tag_byte |= 0x10;
if ((twd & 0x0c00) != 0x0c00)
tag_byte |= 0x20;
if ((twd & 0x3000) != 0x3000)
tag_byte |= 0x40;
if ((twd & 0xc000) != 0xc000)
tag_byte |= 0x80;
return tag_byte;
}
uint16_t
unpack_FPU_TW(uint16_t tag_byte)
{
uint32_t twd = 0;
/* FTW
*
* Note that the original format for FTW can be recreated from the stored
* FTW valid bits and the stored 80-bit FP data (assuming the stored data
* was not the contents of MMX registers) using the following table:
| Exponent | Exponent | Fraction | J,M bits | FTW valid | x87 FTW |
| all 1s | all 0s | all 0s | | | |
your_sha256_hash---
| 0 | 0 | 0 | 0x | 1 | S 10 |
| 0 | 0 | 0 | 1x | 1 | V 00 |
your_sha256_hash---
| 0 | 0 | 1 | 00 | 1 | S 10 |
| 0 | 0 | 1 | 10 | 1 | V 00 |
your_sha256_hash---
| 0 | 1 | 0 | 0x | 1 | S 10 |
| 0 | 1 | 0 | 1x | 1 | S 10 |
your_sha256_hash---
| 0 | 1 | 1 | 00 | 1 | Z 01 |
| 0 | 1 | 1 | 10 | 1 | S 10 |
your_sha256_hash---
| 1 | 0 | 0 | 1x | 1 | S 10 |
| 1 | 0 | 0 | 1x | 1 | S 10 |
your_sha256_hash---
| 1 | 0 | 1 | 00 | 1 | S 10 |
| 1 | 0 | 1 | 10 | 1 | S 10 |
your_sha256_hash---
| all combinations above | 0 | E 11 |
*
* The J-bit is defined to be the 1-bit binary integer to the left of
* the decimal place in the significand.
*
* The M-bit is defined to be the most significant bit of the fractional
* portion of the significand (i.e., the bit immediately to the right of
* the decimal place). When the M-bit is the most significant bit of the
* fractional portion of the significand, it must be 0 if the fraction
* is all 0's.
*/
for (int index = 7; index >= 0; index--, twd <<= 2, tag_byte <<= 1) {
if (tag_byte & 0x80)
twd |= FPU_tagof(fpu_state.st_space[index & 7]);
else
twd |= X87_TAG_EMPTY;
}
return (twd >> 2);
}
#ifdef ENABLE_FPU_X87_LOG
void
x87_dumpregs(void)
{
if (cpu_state.ismmx) {
fpu_x87_log("MM0=%016llX\tMM1=%016llX\tMM2=%016llX\tMM3=%016llX\n", cpu_state.MM[0].q, cpu_state.MM[1].q, cpu_state.MM[2].q, cpu_state.MM[3].q);
fpu_x87_log("MM4=%016llX\tMM5=%016llX\tMM6=%016llX\tMM7=%016llX\n", cpu_state.MM[4].q, cpu_state.MM[5].q, cpu_state.MM[6].q, cpu_state.MM[7].q);
} else {
fpu_x87_log("ST(0)=%f\tST(1)=%f\tST(2)=%f\tST(3)=%f\t\n", cpu_state.ST[cpu_state.TOP], cpu_state.ST[(cpu_state.TOP + 1) & 7], cpu_state.ST[(cpu_state.TOP + 2) & 7], cpu_state.ST[(cpu_state.TOP + 3) & 7]);
fpu_x87_log("ST(4)=%f\tST(5)=%f\tST(6)=%f\tST(7)=%f\t\n", cpu_state.ST[(cpu_state.TOP + 4) & 7], cpu_state.ST[(cpu_state.TOP + 5) & 7], cpu_state.ST[(cpu_state.TOP + 6) & 7], cpu_state.ST[(cpu_state.TOP + 7) & 7]);
}
fpu_x87_log("Status = %04X Control = %04X Tag = %04X\n", cpu_state.npxs, cpu_state.npxc, x87_gettag());
}
#endif
``` | /content/code_sandbox/src/cpu/x87.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 5,072 |
```objective-c
static uint32_t
fpu_save_environment(void)
{
int tag;
unsigned offset = 0;
/* read all registers in stack order and update x87 tag word */
for (int n = 0; n < 8; n++) {
// update tag only if it is not empty
if (!IS_TAG_EMPTY(n)) {
tag = FPU_tagof(FPU_read_regi(n));
FPU_settagi(tag, n);
}
}
fpu_state.swd = (fpu_state.swd & ~(7 << 11)) | ((fpu_state.tos & 7) << 11);
switch ((cr0 & 1) | (cpu_state.op32 & 0x100)) {
case 0x000:
{ /*16-bit real mode*/
uint16_t tmp;
uint32_t fp_ip;
uint32_t fp_dp;
fp_ip = ((uint32_t) (fpu_state.fcs << 4)) | fpu_state.fip;
fp_dp = ((uint32_t) (fpu_state.fds << 4)) | fpu_state.fdp;
tmp = i387_get_control_word();
writememw(easeg, cpu_state.eaaddr + 0x00, tmp);
tmp = i387_get_status_word();
writememw(easeg, cpu_state.eaaddr + 0x02, tmp);
tmp = fpu_state.tag;
writememw(easeg, cpu_state.eaaddr + 0x04, tmp);
tmp = fp_ip & 0xffff;
writememw(easeg, cpu_state.eaaddr + 0x06, tmp);
tmp = (uint16_t) ((fp_ip & 0xf0000) >> 4) | fpu_state.foo;
writememw(easeg, cpu_state.eaaddr + 0x08, tmp);
tmp = fp_dp & 0xffff;
writememw(easeg, cpu_state.eaaddr + 0x0a, tmp);
tmp = (uint16_t) ((fp_dp & 0xf0000) >> 4);
writememw(easeg, cpu_state.eaaddr + 0x0c, tmp);
offset = 0x0e;
}
break;
case 0x001:
{ /*16-bit protected mode*/
uint16_t tmp;
tmp = i387_get_control_word();
writememw(easeg, cpu_state.eaaddr + 0x00, tmp);
tmp = i387_get_status_word();
writememw(easeg, cpu_state.eaaddr + 0x02, tmp);
tmp = fpu_state.tag;
writememw(easeg, cpu_state.eaaddr + 0x04, tmp);
tmp = (uint16_t) (fpu_state.fip) & 0xffff;
writememw(easeg, cpu_state.eaaddr + 0x06, tmp);
tmp = fpu_state.fcs;
writememw(easeg, cpu_state.eaaddr + 0x08, tmp);
tmp = (uint16_t) (fpu_state.fdp) & 0xffff;
writememw(easeg, cpu_state.eaaddr + 0x0a, tmp);
tmp = fpu_state.fds;
writememw(easeg, cpu_state.eaaddr + 0x0c, tmp);
offset = 0x0e;
}
break;
case 0x100:
{ /*32-bit real mode*/
uint32_t tmp;
uint32_t fp_ip;
uint32_t fp_dp;
fp_ip = ((uint32_t) (fpu_state.fcs << 4)) | fpu_state.fip;
fp_dp = ((uint32_t) (fpu_state.fds << 4)) | fpu_state.fdp;
tmp = 0xffff0000 | i387_get_control_word();
writememl(easeg, cpu_state.eaaddr + 0x00, tmp);
tmp = 0xffff0000 | i387_get_status_word();
writememl(easeg, cpu_state.eaaddr + 0x04, tmp);
tmp = 0xffff0000 | fpu_state.tag;
writememl(easeg, cpu_state.eaaddr + 0x08, tmp);
tmp = 0xffff0000 | (fp_ip & 0xffff);
writememl(easeg, cpu_state.eaaddr + 0x0c, tmp);
tmp = ((fp_ip & 0xffff0000) >> 4) | fpu_state.foo;
writememl(easeg, cpu_state.eaaddr + 0x10, tmp);
tmp = 0xffff0000 | (fp_dp & 0xffff);
writememl(easeg, cpu_state.eaaddr + 0x14, tmp);
tmp = (fp_dp & 0xffff0000) >> 4;
writememl(easeg, cpu_state.eaaddr + 0x18, tmp);
offset = 0x1c;
}
break;
case 0x101:
{ /*32-bit protected mode*/
uint32_t tmp;
tmp = 0xffff0000 | i387_get_control_word();
writememl(easeg, cpu_state.eaaddr + 0x00, tmp);
tmp = 0xffff0000 | i387_get_status_word();
writememl(easeg, cpu_state.eaaddr + 0x04, tmp);
tmp = 0xffff0000 | fpu_state.tag;
writememl(easeg, cpu_state.eaaddr + 0x08, tmp);
tmp = (uint32_t) (fpu_state.fip);
writememl(easeg, cpu_state.eaaddr + 0x0c, tmp);
tmp = fpu_state.fcs | (((uint32_t) (fpu_state.foo)) << 16);
writememl(easeg, cpu_state.eaaddr + 0x10, tmp);
tmp = (uint32_t) (fpu_state.fdp);
writememl(easeg, cpu_state.eaaddr + 0x14, tmp);
tmp = 0xffff0000 | fpu_state.fds;
writememl(easeg, cpu_state.eaaddr + 0x18, tmp);
offset = 0x1c;
}
break;
}
return (cpu_state.eaaddr + offset);
}
static uint32_t
fpu_load_environment(void)
{
unsigned offset = 0;
switch ((cr0 & 1) | (cpu_state.op32 & 0x100)) {
case 0x000:
{ /*16-bit real mode*/
uint16_t tmp;
uint32_t fp_ip;
uint32_t fp_dp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x0c);
fp_dp = (tmp & 0xf000) << 4;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x0a);
fpu_state.fdp = fp_dp | tmp;
fpu_state.fds = 0;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x08);
fp_ip = (tmp & 0xf000) << 4;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x06);
fpu_state.fip = fp_ip | tmp;
fpu_state.fcs = 0;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x04);
fpu_state.tag = tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x02);
fpu_state.swd = tmp;
fpu_state.tos = (tmp >> 11) & 7;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x00);
fpu_state.cwd = tmp;
offset = 0x0e;
}
break;
case 0x001:
{ /*16-bit protected mode*/
uint16_t tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x0c);
fpu_state.fds = tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x0a);
fpu_state.fdp = tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x08);
fpu_state.fcs = tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x06);
fpu_state.fip = tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x04);
fpu_state.tag = tmp;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x02);
fpu_state.swd = tmp;
fpu_state.tos = (tmp >> 11) & 7;
tmp = readmemw(easeg, cpu_state.eaaddr + 0x00);
fpu_state.cwd = tmp;
offset = 0x0e;
}
break;
case 0x100:
{ /*32-bit real mode*/
uint32_t tmp;
uint32_t fp_ip;
uint32_t fp_dp;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x18);
fp_dp = (tmp & 0x0ffff000) << 4;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x14);
fp_dp |= (tmp & 0xffff);
fpu_state.fdp = fp_dp;
fpu_state.fds = 0;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x10);
fpu_state.foo = tmp & 0x07ff;
fp_ip = (tmp & 0x0ffff000) << 4;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x0c);
fp_ip |= (tmp & 0xffff);
fpu_state.fip = fp_ip;
fpu_state.fcs = 0;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x08);
fpu_state.tag = tmp & 0xffff;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x04);
fpu_state.swd = tmp & 0xffff;
fpu_state.tos = (tmp >> 11) & 7;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x00);
fpu_state.cwd = tmp & 0xffff;
offset = 0x1c;
}
break;
case 0x101:
{ /*32-bit protected mode*/
uint32_t tmp;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x18);
fpu_state.fds = tmp & 0xffff;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x14);
fpu_state.fdp = tmp;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x10);
fpu_state.fcs = tmp & 0xffff;
fpu_state.foo = (tmp >> 16) & 0x07ff;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x0c);
fpu_state.fip = tmp;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x08);
fpu_state.tag = tmp & 0xffff;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x04);
fpu_state.swd = tmp & 0xffff;
fpu_state.tos = (tmp >> 11) & 7;
tmp = readmeml(easeg, cpu_state.eaaddr + 0x00);
fpu_state.cwd = tmp & 0xffff;
offset = 0x1c;
}
break;
}
/* always set bit 6 as '1 */
fpu_state.cwd = (fpu_state.cwd & ~FPU_CW_Reserved_Bits) | 0x0040;
/* check for unmasked exceptions */
if (fpu_state.swd & ~fpu_state.cwd & FPU_CW_Exceptions_Mask) {
/* set the B and ES bits in the status-word */
fpu_state.swd |= (FPU_SW_Summary | FPU_SW_Backward);
} else {
/* clear the B and ES bits in the status-word */
fpu_state.swd &= ~(FPU_SW_Summary | FPU_SW_Backward);
}
return (cpu_state.eaaddr + offset);
}
static int
sf_FLDCW_a16(uint32_t fetchdat)
{
uint16_t tempw;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
tempw = geteaw();
if (cpu_state.abrt)
return 1;
fpu_state.cwd = (tempw & ~FPU_CW_Reserved_Bits) | 0x0040; // bit 6 is reserved as '1
/* check for unmasked exceptions */
if (fpu_state.swd & (~fpu_state.cwd & FPU_CW_Exceptions_Mask)) {
/* set the B and ES bits in the status-word */
fpu_state.swd |= (FPU_SW_Summary | FPU_SW_Backward);
} else {
/* clear the B and ES bits in the status-word */
fpu_state.swd &= ~(FPU_SW_Summary | FPU_SW_Backward);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldcw) : (x87_timings.fldcw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldcw) : (x87_concurrency.fldcw * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FLDCW_a32(uint32_t fetchdat)
{
uint16_t tempw;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
tempw = geteaw();
if (cpu_state.abrt)
return 1;
fpu_state.cwd = (tempw & ~FPU_CW_Reserved_Bits) | 0x0040; // bit 6 is reserved as '1
/* check for unmasked exceptions */
if (fpu_state.swd & (~fpu_state.cwd & FPU_CW_Exceptions_Mask)) {
/* set the B and ES bits in the status-word */
fpu_state.swd |= (FPU_SW_Summary | FPU_SW_Backward);
} else {
/* clear the B and ES bits in the status-word */
fpu_state.swd &= ~(FPU_SW_Summary | FPU_SW_Backward);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldcw) : (x87_timings.fldcw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldcw) : (x87_concurrency.fldcw * cpu_multi));
return 0;
}
#endif
static int
sf_FNSTCW_a16(uint32_t fetchdat)
{
uint16_t cwd = i387_get_control_word();
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(cwd);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstenv) : (x87_concurrency.fstenv * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FNSTCW_a32(uint32_t fetchdat)
{
uint16_t cwd = i387_get_control_word();
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(cwd);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FNSTSW_a16(uint32_t fetchdat)
{
uint16_t swd = i387_get_status_word();
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(swd);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FNSTSW_a32(uint32_t fetchdat)
{
uint16_t swd = i387_get_status_word();
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(swd);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return cpu_state.abrt;
}
#endif
#ifdef FPU_8087
static int
sf_FI(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
fpu_state.cwd &= ~FPU_SW_Summary;
if (rmdat == 0xe1)
fpu_state.cwd |= FPU_SW_Summary;
wait(3, 0);
return 0;
}
#else
static int
sf_FNSTSW_AX(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
AX = i387_get_status_word();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstcw_sw) : (x87_timings.fstcw_sw * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstcw_sw) : (x87_concurrency.fstcw_sw * cpu_multi));
return 0;
}
#endif
static int
sf_FRSTOR_a16(uint32_t fetchdat)
{
floatx80 tmp;
int offset;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
offset = fpu_load_environment();
for (int n = 0; n < 8; n++) {
tmp.signif = readmemq(easeg, offset + (n * 10));
tmp.signExp = readmemw(easeg, offset + (n * 10) + 8);
FPU_save_regi_tag(tmp, IS_TAG_EMPTY(n) ? X87_TAG_EMPTY : FPU_tagof(tmp), n);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.frstor) : (x87_timings.frstor * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.frstor) : (x87_concurrency.frstor * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FRSTOR_a32(uint32_t fetchdat)
{
floatx80 tmp;
int offset;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
offset = fpu_load_environment();
for (int n = 0; n < 8; n++) {
tmp.signif = readmemq(easeg, offset + (n * 10));
tmp.signExp = readmemw(easeg, offset + (n * 10) + 8);
FPU_save_regi_tag(tmp, IS_TAG_EMPTY(n) ? X87_TAG_EMPTY : FPU_tagof(tmp), n);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.frstor) : (x87_timings.frstor * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.frstor) : (x87_concurrency.frstor * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FNSAVE_a16(uint32_t fetchdat)
{
floatx80 stn;
int offset;
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
offset = fpu_save_environment();
/* save all registers in stack order. */
for (int m = 0; m < 8; m++) {
stn = FPU_read_regi(m);
writememq(easeg, offset + (m * 10), stn.signif);
writememw(easeg, offset + (m * 10) + 8, stn.signExp);
}
#ifdef FPU_8087
fpu_state.cwd = 0x3FF;
fpu_state.swd &= 0x4700;
#else
fpu_state.cwd = 0x37F;
fpu_state.swd = 0;
#endif
fpu_state.tos = 0;
fpu_state.tag = 0xffff;
cpu_state.ismmx = 0;
fpu_state.foo = 0;
fpu_state.fds = 0;
fpu_state.fdp = 0;
fpu_state.fcs = 0;
fpu_state.fip = 0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsave) : (x87_timings.fsave * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsave) : (x87_concurrency.fsave * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FNSAVE_a32(uint32_t fetchdat)
{
floatx80 stn;
int offset;
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
offset = fpu_save_environment();
/* save all registers in stack order. */
for (int m = 0; m < 8; m++) {
stn = FPU_read_regi(m);
writememq(easeg, offset + (m * 10), stn.signif);
writememw(easeg, offset + (m * 10) + 8, stn.signExp);
}
# ifdef FPU_8087
fpu_state.cwd = 0x3FF;
# else
fpu_state.cwd = 0x37F;
# endif
fpu_state.swd = 0;
fpu_state.tos = 0;
fpu_state.tag = 0xffff;
cpu_state.ismmx = 0;
fpu_state.foo = 0;
fpu_state.fds = 0;
fpu_state.fdp = 0;
fpu_state.fcs = 0;
fpu_state.fip = 0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsave) : (x87_timings.fsave * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsave) : (x87_concurrency.fsave * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FNCLEX(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
fpu_state.swd &= ~(FPU_SW_Backward | FPU_SW_Summary | FPU_SW_Stack_Fault | FPU_SW_Precision | FPU_SW_Underflow | FPU_SW_Overflow | FPU_SW_Zero_Div | FPU_SW_Denormal_Op | FPU_SW_Invalid);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fnop) : (x87_timings.fnop * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fnop) : (x87_concurrency.fnop * cpu_multi));
return 0;
}
static int
sf_FNINIT(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
#ifdef FPU_8087
fpu_state.cwd = 0x3FF;
fpu_state.swd &= 0x4700;
#else
fpu_state.cwd = 0x37F;
fpu_state.swd = 0;
#endif
fpu_state.tos = 0;
fpu_state.tag = 0xffff;
fpu_state.foo = 0;
fpu_state.fds = 0;
fpu_state.fdp = 0;
fpu_state.fcs = 0;
fpu_state.fip = 0;
cpu_state.ismmx = 0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.finit) : (x87_timings.finit * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.finit) : (x87_concurrency.finit * cpu_multi));
CPU_BLOCK_END();
return 0;
}
static int
sf_FLDENV_a16(uint32_t fetchdat)
{
int tag;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
fpu_load_environment();
/* read all registers in stack order and update x87 tag word */
for (int n = 0; n < 8; n++) {
// update tag only if it is not empty
if (!IS_TAG_EMPTY(n)) {
tag = FPU_tagof(FPU_read_regi(n));
FPU_settagi(tag, n);
}
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldenv) : (x87_timings.fldenv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldenv) : (x87_concurrency.fldenv * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FLDENV_a32(uint32_t fetchdat)
{
int tag;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
fpu_load_environment();
/* read all registers in stack order and update x87 tag word */
for (int n = 0; n < 8; n++) {
// update tag only if it is not empty
if (!IS_TAG_EMPTY(n)) {
tag = FPU_tagof(FPU_read_regi(n));
FPU_settagi(tag, n);
}
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fldenv) : (x87_timings.fldenv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fldenv) : (x87_concurrency.fldenv * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FNSTENV_a16(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
fpu_save_environment();
/* mask all floating point exceptions */
fpu_state.cwd |= FPU_CW_Exceptions_Mask;
/* clear the B and ES bits in the status word */
fpu_state.swd &= ~(FPU_SW_Backward | FPU_SW_Summary);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstenv) : (x87_timings.fstenv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstenv) : (x87_concurrency.fstenv * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FNSTENV_a32(uint32_t fetchdat)
{
FP_ENTER();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
fpu_save_environment();
/* mask all floating point exceptions */
fpu_state.cwd |= FPU_CW_Exceptions_Mask;
/* clear the B and ES bits in the status word */
fpu_state.swd &= ~(FPU_SW_Backward | FPU_SW_Summary);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fstenv) : (x87_timings.fstenv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fstenv) : (x87_concurrency.fstenv * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FNOP(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fnop) : (x87_timings.fnop * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fnop) : (x87_concurrency.fnop * cpu_multi));
return 0;
}
``` | /content/code_sandbox/src/cpu/x87_ops_sf.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 7,040 |
```objective-c
static int
opAAA(uint32_t fetchdat)
{
flags_rebuild();
if ((cpu_state.flags & A_FLAG) || ((AL & 0xF) > 9)) {
/* On 286, it's indeed AX - behavior difference from 808x. */
AX += 6;
AH++;
cpu_state.flags |= (A_FLAG | C_FLAG);
} else
cpu_state.flags &= ~(A_FLAG | C_FLAG);
AL &= 0xF;
CLOCK_CYCLES(is486 ? 3 : 4);
PREFETCH_RUN(is486 ? 3 : 4, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opAAD(uint32_t fetchdat)
{
int base = getbytef();
if (!cpu_isintel)
base = 10;
AL = (AH * base) + AL;
AH = 0;
setznp16(AX);
CLOCK_CYCLES((is486) ? 14 : 19);
PREFETCH_RUN(is486 ? 14 : 19, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opAAM(uint32_t fetchdat)
{
int base = getbytef();
if (!base || !cpu_isintel)
base = 10;
AH = AL / base;
AL %= base;
setznp16(AX);
CLOCK_CYCLES((is486) ? 15 : 17);
PREFETCH_RUN(is486 ? 15 : 17, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opAAS(uint32_t fetchdat)
{
flags_rebuild();
if ((cpu_state.flags & A_FLAG) || ((AL & 0xF) > 9)) {
/* On 286, it's indeed AX - behavior difference from 808x. */
AX -= 6;
AH--;
cpu_state.flags |= (A_FLAG | C_FLAG);
} else
cpu_state.flags &= ~(A_FLAG | C_FLAG);
AL &= 0xF;
CLOCK_CYCLES(is486 ? 3 : 4);
PREFETCH_RUN(is486 ? 3 : 4, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opDAA(uint32_t fetchdat)
{
uint16_t tempw;
uint16_t old_AL;
uint16_t old_CF;
flags_rebuild();
old_AL = AL;
old_CF = cpu_state.flags & C_FLAG;
cpu_state.flags &= ~C_FLAG;
if (((AL & 0xf) > 9) || (cpu_state.flags & A_FLAG)) {
int tempi = ((uint16_t) AL) + 6;
AL += 6;
if (old_CF || (tempi & 0x100))
cpu_state.flags |= C_FLAG;
cpu_state.flags |= A_FLAG;
} else
cpu_state.flags &= ~A_FLAG;
if ((old_AL > 0x99) || old_CF) {
AL += 0x60;
cpu_state.flags |= C_FLAG;
} else
cpu_state.flags &= ~C_FLAG;
tempw = cpu_state.flags & (C_FLAG | A_FLAG);
setznp8(AL);
flags_rebuild();
cpu_state.flags = (cpu_state.flags & ~(C_FLAG | A_FLAG)) | tempw;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opDAS(uint32_t fetchdat)
{
uint16_t tempw;
uint16_t old_AL;
uint16_t old_CF;
flags_rebuild();
old_AL = AL;
old_CF = cpu_state.flags & C_FLAG;
cpu_state.flags &= ~C_FLAG;
if (((AL & 0xf) > 9) || (cpu_state.flags & A_FLAG)) {
int tempi = ((uint16_t) AL) - 6;
AL -= 6;
if (old_CF || (tempi & 0x100))
cpu_state.flags |= C_FLAG;
cpu_state.flags |= A_FLAG;
} else
cpu_state.flags &= ~A_FLAG;
if ((old_AL > 0x99) || old_CF) {
AL -= 0x60;
cpu_state.flags |= C_FLAG;
}
tempw = cpu_state.flags & (C_FLAG | A_FLAG);
setznp8(AL);
flags_rebuild();
cpu_state.flags = (cpu_state.flags & ~(C_FLAG | A_FLAG)) | tempw;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_bcd.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,110 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* CPU type handler.
*
*
*
* Authors: Sarah Walker, <path_to_url
* leilei,
* Miran Grca, <mgrca8@gmail.com>
*
*/
#ifndef EMU_CPU_H
#define EMU_CPU_H
enum {
FPU_NONE,
FPU_8087,
FPU_80187,
FPU_287,
FPU_287XL,
FPU_387,
FPU_487SX,
FPU_INTERNAL
};
enum {
CPU_8088 = 1, /* 808x class CPUs */
CPU_8086,
CPU_V20, /* NEC 808x class CPUs */
CPU_V30,
CPU_188, /* 18x class CPUs */
CPU_186,
CPU_286, /* 286 class CPUs */
CPU_386SX, /* 386 class CPUs */
CPU_IBM386SLC,
CPU_IBM486SLC,
CPU_386DX,
CPU_IBM486BL,
CPU_RAPIDCAD,
CPU_486SLC,
CPU_486DLC,
CPU_i486SX, /* 486 class CPUs */
CPU_Am486SX,
CPU_Cx486S,
CPU_i486DX,
CPU_Am486DX,
CPU_Am486DXL,
CPU_Cx486DX,
CPU_STPC,
CPU_i486SX_SLENH,
CPU_i486DX_SLENH,
CPU_ENH_Am486DX,
CPU_Cx5x86,
CPU_P24T,
CPU_WINCHIP, /* 586 class CPUs */
CPU_WINCHIP2,
CPU_PENTIUM,
CPU_PENTIUMMMX,
CPU_Cx6x86,
CPU_Cx6x86MX,
CPU_Cx6x86L,
CPU_CxGX1,
CPU_K5,
CPU_5K86,
CPU_K6,
CPU_K6_2,
CPU_K6_2C,
CPU_K6_3,
CPU_K6_2P,
CPU_K6_3P,
CPU_CYRIX3S,
CPU_PENTIUMPRO, /* 686 class CPUs */
CPU_PENTIUM2,
CPU_PENTIUM2D
};
enum {
CPU_PKG_8088 = (1 << 0),
CPU_PKG_8088_EUROPC = (1 << 1),
CPU_PKG_8086 = (1 << 2),
CPU_PKG_188 = (1 << 3),
CPU_PKG_186 = (1 << 4),
CPU_PKG_286 = (1 << 5),
CPU_PKG_386SX = (1 << 6),
CPU_PKG_386DX = (1 << 7),
CPU_PKG_386DX_DESKPRO386 = (1 << 8),
CPU_PKG_M6117 = (1 << 9),
CPU_PKG_386SLC_IBM = (1 << 10),
CPU_PKG_486SLC = (1 << 11),
CPU_PKG_486SLC_IBM = (1 << 12),
CPU_PKG_486BL = (1 << 13),
CPU_PKG_486DLC = (1 << 14),
CPU_PKG_SOCKET1 = (1 << 15),
CPU_PKG_SOCKET3 = (1 << 16),
CPU_PKG_SOCKET3_PC330 = (1 << 17),
CPU_PKG_STPC = (1 << 18),
CPU_PKG_SOCKET4 = (1 << 19),
CPU_PKG_SOCKET5_7 = (1 << 20),
CPU_PKG_SOCKET8 = (1 << 21),
CPU_PKG_SLOT1 = (1 << 22),
CPU_PKG_SLOT2 = (1 << 23),
CPU_PKG_SOCKET370 = (1 << 24)
};
#define CPU_SUPPORTS_DYNAREC 1
#define CPU_REQUIRES_DYNAREC 2
#define CPU_ALTERNATE_XTAL 4
#define CPU_FIXED_MULTIPLIER 8
#if (defined __amd64__ || defined _M_X64)
# define LOOKUP_INV -1LL
#else
# define LOOKUP_INV -1
#endif
typedef struct fpu_t {
const char *name;
const char *internal_name;
const int type;
} FPU;
typedef struct cpu_t {
const char *name;
uint64_t cpu_type;
const FPU *fpus;
uint32_t rspeed;
double multi;
uint16_t voltage;
uint32_t edx_reset;
uint32_t cpuid_model;
uint16_t cyrix_id;
uint8_t cpu_flags;
int8_t mem_read_cycles;
int8_t mem_write_cycles;
int8_t cache_read_cycles;
int8_t cache_write_cycles;
int8_t atclk_div;
} CPU;
typedef struct {
const uint32_t package;
const char *manufacturer;
const char *name;
const char *internal_name;
const CPU *cpus;
} cpu_family_t;
#define C_FLAG 0x0001
#define P_FLAG 0x0004
#define A_FLAG 0x0010
#define Z_FLAG 0x0040
#define N_FLAG 0x0080
#define T_FLAG 0x0100
#define I_FLAG 0x0200
#define D_FLAG 0x0400
#define V_FLAG 0x0800
#define NT_FLAG 0x4000
#define MD_FLAG 0x8000
#define RF_FLAG 0x0001 /* in EFLAGS */
#define VM_FLAG 0x0002 /* in EFLAGS */
#define VIF_FLAG 0x0008 /* in EFLAGS */
#define VIP_FLAG 0x0010 /* in EFLAGS */
#define VID_FLAG 0x0020 /* in EFLAGS */
#define EM_FLAG 0x00004 /* in CR0 */
#define WP_FLAG 0x10000 /* in CR0 */
#define CR4_VME (1 << 0) /* Virtual 8086 Mode Extensions */
#define CR4_PVI (1 << 1) /* Protected-mode Virtual Interrupts */
#define CR4_TSD (1 << 2) /* Time Stamp Disable */
#define CR4_DE (1 << 3) /* Debugging Extensions */
#define CR4_PSE (1 << 4) /* Page Size Extension */
#define CR4_PAE (1 << 5) /* Physical Address Extension */
#define CR4_MCE (1 << 6) /* Machine Check Exception */
#define CR4_PGE (1 << 7) /* Page Global Enabled */
#define CR4_PCE (1 << 8) /* Performance-Monitoring Counter enable */
#define CR4_OSFXSR (1 << 9) /* Operating system support for FXSAVE and FXRSTOR instructions */
#define CPL ((cpu_state.seg_cs.access >> 5) & 3)
#define IOPL ((cpu_state.flags >> 12) & 3)
#define IOPLp ((!(msw & 1)) || (CPL <= IOPL))
typedef union {
uint32_t l;
uint16_t w;
struct {
uint8_t l;
uint8_t h;
} b;
} x86reg;
typedef struct {
uint32_t base;
uint32_t limit;
uint8_t access;
uint8_t ar_high;
uint16_t seg;
uint32_t limit_low;
uint32_t limit_high;
int checked; /*Non-zero if selector is known to be valid*/
} x86seg;
typedef union {
uint64_t q;
int64_t sq;
uint32_t l[2];
int32_t sl[2];
uint16_t w[4];
int16_t sw[4];
uint8_t b[8];
int8_t sb[8];
float f[2];
} MMX_REG;
typedef struct {
/* IBM 386SLC/486SLC/486BL MSRs */
uint64_t ibm_por; /* 0x00001000 - 386SLC and later */
uint64_t ibm_crcr; /* 0x00001001 - 386SLC and later */
uint64_t ibm_por2; /* 0x00001002 - 486SLC and later */
uint64_t ibm_pcr; /* 0x00001004 - 486BL3 */
/* IDT WinChip C6/2/VIA Cyrix III MSRs */
uint32_t fcr; /* 0x00000107 (IDT), 0x00001107 (VIA) */
uint64_t fcr2; /* 0x00000108 (IDT), 0x00001108 (VIA) */
uint64_t fcr3; /* 0x00000108 (IDT), 0x00001108 (VIA) */
uint64_t mcr[8]; /* 0x00000110 - 0x00000117 (IDT) */
uint32_t mcr_ctrl; /* 0x00000120 (IDT) */
/* AMD K5/K6 MSRs */
uint64_t amd_aar; /* 0x00000082 - all K5 */
uint64_t amd_hwcr; /* 0x00000083 - all K5 and all K6 */
uint64_t amd_watmcr; /* 0x00000085 - K5 Model 1 and later */
uint64_t amd_wapmrr; /* 0x00000086 - K5 Model 1 and later */
uint64_t amd_efer; /* 0xc0000080 - all K5 and all K6 */
uint64_t amd_star; /* 0xc0000081 - K6-2 and later */
uint64_t amd_whcr; /* 0xc0000082 - all K5 and all K6 */
uint64_t amd_uwccr; /* 0xc0000085 - K6-2C and later */
uint64_t amd_epmr; /* 0xc0000086 - K6-III+/2+ only */
uint64_t amd_psor; /* 0xc0000087 - K6-2C and later */
uint64_t amd_pfir; /* 0xc0000088 - K6-2C and later */
uint64_t amd_l2aar; /* 0xc0000089 - K6-III and later */
/* Pentium/Pentium MMX MSRs */
uint64_t mcar; /* 0x00000000 - also on K5 and (R/W) K6 */
uint64_t mctr; /* 0x00000001 - also on K5 and (R/W) K6 */
uint32_t tr1; /* 0x00000002 - also on WinChip C6/2 */
uint32_t tr2; /* 0x00000004 - reserved on PMMX */
uint32_t tr3; /* 0x00000005 */
uint32_t tr4; /* 0x00000006 */
uint32_t tr5; /* 0x00000007 */
uint32_t tr6; /* 0x00000008 */
uint32_t tr7; /* 0x00000009 */
uint32_t tr9; /* 0x0000000b */
uint32_t tr10; /* 0x0000000c */
uint32_t tr11; /* 0x0000000d */
uint32_t tr12; /* 0x0000000e - also on WinChip C6/2 and K6 */
uint32_t cesr; /* 0x00000011 - also on WinChip C6/2 and Cx6x86MX */
uint64_t pmc[2]; /* 0x00000012, 0x00000013 - also on WinChip C6/2 and Cx6x86MX */
uint32_t fp_last_xcpt; /* 0x8000001b - undocumented */
uint32_t probe_ctl; /* 0x8000001d - undocumented */
uint32_t ecx8000001e; /* 0x8000001e - undocumented */
uint32_t ecx8000001f; /* 0x8000001f - undocumented */
/* Pentium Pro/II MSRs */
uint64_t apic_base; /* 0x0000001b */
uint32_t test_ctl; /* 0x00000033 */
uint64_t bios_updt; /* 0x00000079 */
uint64_t bbl_cr_dx[4]; /* 0x00000088 - 0x0000008b */
uint64_t perfctr[2]; /* 0x000000c1, 0x000000c2 */
uint64_t mtrr_cap; /* 0x000000fe */
uint64_t bbl_cr_addr; /* 0x00000116 */
uint64_t bbl_cr_decc; /* 0x00000118 */
uint64_t bbl_cr_ctl; /* 0x00000119 */
uint64_t bbl_cr_trig; /* 0x0000011a */
uint64_t bbl_cr_busy; /* 0x0000011b */
uint64_t bbl_cr_ctl3; /* 0x0000011e */
uint16_t sysenter_cs; /* 0x00000174 - Pentium II and later */
uint32_t sysenter_esp; /* 0x00000175 - Pentium II and later */
uint32_t sysenter_eip; /* 0x00000176 - Pentium II and later */
uint64_t mcg_ctl; /* 0x0000017b */
uint64_t evntsel[2]; /* 0x00000186, 0x00000187 */
uint32_t debug_ctl; /* 0x000001d9 */
uint32_t rob_cr_bkuptmpdr6; /* 0x000001e0 */
/* MTTR-related MSRs also present on the VIA Cyrix III */
uint64_t mtrr_physbase[8]; /* 0x00000200 - 0x0000020f (ECX & 0) */
uint64_t mtrr_physmask[8]; /* 0x00000200 - 0x0000020f (ECX & 1) */
uint64_t mtrr_fix64k_8000; /* 0x00000250 */
uint64_t mtrr_fix16k_8000; /* 0x00000258 */
uint64_t mtrr_fix16k_a000; /* 0x00000259 */
uint64_t mtrr_fix4k[8]; /* 0x00000268 - 0x0000026f */
uint64_t mtrr_deftype; /* 0x000002ff */
uint64_t pat; /* 0x00000277 - Pentium II Deschutes and later */
uint64_t mca_ctl[5]; /* 0x00000400, 0x00000404, 0x00000408, 0x0000040c, 0x00000410 */
uint64_t ecx570; /* 0x00000570 */
/* Other/Unclassified MSRs */
uint64_t ecx20; /* 0x00000020, really 0x40000020, but we filter out the top 18 bits
like a real Deschutes does. */
} msr_t;
typedef struct {
x86reg regs[8];
uint8_t tag[8];
x86seg *ea_seg;
uint32_t eaaddr;
int flags_op;
uint32_t flags_res;
uint32_t flags_op1;
uint32_t flags_op2;
uint32_t pc;
uint32_t oldpc;
uint32_t op32;
int TOP;
union {
struct {
int8_t rm;
int8_t mod;
int8_t reg;
} rm_mod_reg;
int32_t rm_mod_reg_data;
} rm_data;
uint8_t ssegs;
uint8_t ismmx;
uint8_t abrt;
uint8_t _smi_line;
int _cycles;
#ifdef FPU_CYCLES
int _fpu_cycles;
#endif
int _in_smm;
uint16_t npxs;
uint16_t npxc;
double ST[8];
uint16_t MM_w4[8];
MMX_REG MM[8];
#ifdef USE_NEW_DYNAREC
# if defined(__APPLE__) && defined(__aarch64__)
uint64_t old_fp_control;
uint64_t new_fp_control;
# else
uint32_t old_fp_control;
uint32_t new_fp_control;
# endif
# if defined i386 || defined __i386 || defined __i386__ || defined _X86_ || defined _M_IX86
uint16_t old_fp_control2;
uint16_t new_fp_control2;
# endif
# if defined i386 || defined __i386 || defined __i386__ || defined _X86_ || defined _M_IX86 || defined __amd64__ || defined _M_X64
uint32_t trunc_fp_control;
# endif
#else
uint16_t old_npxc;
uint16_t new_npxc;
#endif
x86seg seg_cs;
x86seg seg_ds;
x86seg seg_es;
x86seg seg_ss;
x86seg seg_fs;
x86seg seg_gs;
union {
uint32_t l;
uint16_t w;
} CR0;
uint16_t flags;
uint16_t eflags;
uint32_t _smbase;
} cpu_state_t;
#define in_smm cpu_state._in_smm
#define smi_line cpu_state._smi_line
#define smbase cpu_state._smbase
/*The cpu_state.flags below must match in both cpu_cur_status and block->status for a block
to be valid*/
#define CPU_STATUS_USE32 (1 << 0)
#define CPU_STATUS_STACK32 (1 << 1)
#define CPU_STATUS_PMODE (1 << 2)
#define CPU_STATUS_V86 (1 << 3)
#define CPU_STATUS_SMM (1 << 4)
#ifdef USE_NEW_DYNAREC
# define CPU_STATUS_FLAGS 0xff
#else
# define CPU_STATUS_FLAGS 0xffff
#endif
/*If the cpu_state.flags below are set in cpu_cur_status, they must be set in block->status.
Otherwise they are ignored*/
#ifdef USE_NEW_DYNAREC
# define CPU_STATUS_NOTFLATDS (1 << 8)
# define CPU_STATUS_NOTFLATSS (1 << 9)
# define CPU_STATUS_MASK 0xff00
#else
# define CPU_STATUS_NOTFLATDS (1 << 16)
# define CPU_STATUS_NOTFLATSS (1 << 17)
# define CPU_STATUS_MASK 0xffff0000
#endif
#ifdef _MSC_VER
# define COMPILE_TIME_ASSERT(expr) /*nada*/
#else
# ifdef EXTREME_DEBUG
# define COMPILE_TIME_ASSERT(expr) typedef char COMP_TIME_ASSERT[(expr) ? 1 : 0];
# else
# define COMPILE_TIME_ASSERT(expr) /*nada*/
# endif
#endif
COMPILE_TIME_ASSERT(sizeof(cpu_state_t) <= 128)
#define cpu_state_offset(MEMBER) ((uint8_t) ((uintptr_t) &cpu_state.MEMBER - (uintptr_t) &cpu_state - 128))
#define EAX cpu_state.regs[0].l
#define AX cpu_state.regs[0].w
#define AL cpu_state.regs[0].b.l
#define AH cpu_state.regs[0].b.h
#define ECX cpu_state.regs[1].l
#define CX cpu_state.regs[1].w
#define CL cpu_state.regs[1].b.l
#define CH cpu_state.regs[1].b.h
#define EDX cpu_state.regs[2].l
#define DX cpu_state.regs[2].w
#define DL cpu_state.regs[2].b.l
#define DH cpu_state.regs[2].b.h
#define EBX cpu_state.regs[3].l
#define BX cpu_state.regs[3].w
#define BL cpu_state.regs[3].b.l
#define BH cpu_state.regs[3].b.h
#define ESP cpu_state.regs[4].l
#define EBP cpu_state.regs[5].l
#define ESI cpu_state.regs[6].l
#define EDI cpu_state.regs[7].l
#define SP cpu_state.regs[4].w
#define BP cpu_state.regs[5].w
#define SI cpu_state.regs[6].w
#define DI cpu_state.regs[7].w
#define cycles cpu_state._cycles
#ifdef FPU_CYCLES
# define fpu_cycles cpu_state._fpu_cycles
#endif
#define cpu_rm cpu_state.rm_data.rm_mod_reg.rm
#define cpu_mod cpu_state.rm_data.rm_mod_reg.mod
#define cpu_reg cpu_state.rm_data.rm_mod_reg.reg
/* Global variables. */
extern cpu_state_t cpu_state;
extern const cpu_family_t cpu_families[];
extern cpu_family_t *cpu_f;
extern CPU *cpu_s;
extern int cpu_override;
extern int cpu_isintel;
extern int cpu_iscyrix;
extern int cpu_16bitbus;
extern int cpu_64bitbus;
extern int cpu_pci_speed;
extern int cpu_multi;
extern double cpu_dmulti;
extern double fpu_multi;
extern double cpu_busspeed;
extern int cpu_cyrix_alignment; /* Cyrix 5x86/6x86 only has data misalignment
penalties when crossing 8-byte boundaries. */
extern int cpu_cpurst_on_sr; /* SiS 551x and 5571: Issue CPURST on soft reset. */
extern int is8086;
extern int is186;
extern int is286;
extern int is386;
extern int is6117;
extern int is486;
extern int is_am486;
extern int is_am486dxl;
extern int is_pentium;
extern int is_k5;
extern int is_k6;
extern int is_p6;
extern int is_cxsmm;
extern int hascache;
extern int isibm486;
extern int is_nec;
extern int is_rapidcad;
extern int hasfpu;
#define CPU_FEATURE_RDTSC (1 << 0)
#define CPU_FEATURE_MSR (1 << 1)
#define CPU_FEATURE_MMX (1 << 2)
#define CPU_FEATURE_CR4 (1 << 3)
#define CPU_FEATURE_VME (1 << 4)
#define CPU_FEATURE_CX8 (1 << 5)
#define CPU_FEATURE_3DNOW (1 << 6)
#define CPU_FEATURE_SYSCALL (1 << 7)
#define CPU_FEATURE_3DNOWE (1 << 8)
extern uint32_t cpu_features;
extern int smi_latched;
extern int smm_in_hlt;
extern int smi_block;
#ifdef USE_NEW_DYNAREC
extern uint16_t cpu_cur_status;
#else
extern uint32_t cpu_cur_status;
#endif
extern uint64_t cpu_CR4_mask;
extern uint64_t tsc;
extern msr_t msr;
extern uint8_t opcode;
extern int cpl_override;
extern int CPUID;
extern uint64_t xt_cpu_multi;
extern int isa_cycles;
extern int cpu_inited;
extern uint32_t oldds;
extern uint32_t oldss;
extern uint32_t olddslimit;
extern uint32_t oldsslimit;
extern uint32_t olddslimitw;
extern uint32_t oldsslimitw;
extern uint32_t pccache;
extern uint8_t *pccache2;
extern double bus_timing;
extern double isa_timing;
extern double pci_timing;
extern double agp_timing;
extern uint16_t temp_seg_data[4];
extern uint16_t cs_msr;
extern uint32_t esp_msr;
extern uint32_t eip_msr;
/* For the AMD K6. */
extern uint64_t amd_efer;
extern uint64_t star;
#define cr0 cpu_state.CR0.l
#define msw cpu_state.CR0.w
extern uint32_t cr2;
extern uint32_t cr3;
extern uint32_t cr4;
extern uint32_t dr[8];
extern uint32_t _tr[8];
extern uint32_t cache_index;
extern uint8_t _cache[2048];
/*Segments -
_cs,_ds,_es,_ss are the segment structures
CS,DS,ES,SS is the 16-bit data
cs,ds,es,ss are defines to the bases*/
extern x86seg gdt;
extern x86seg ldt;
extern x86seg idt;
extern x86seg tr;
extern x86seg _oldds;
#define CS cpu_state.seg_cs.seg
#define DS cpu_state.seg_ds.seg
#define ES cpu_state.seg_es.seg
#define SS cpu_state.seg_ss.seg
#define FS cpu_state.seg_fs.seg
#define GS cpu_state.seg_gs.seg
#define cs cpu_state.seg_cs.base
#define ds cpu_state.seg_ds.base
#define es cpu_state.seg_es.base
#define ss cpu_state.seg_ss.base
#define fs_seg cpu_state.seg_fs.base
#define gs cpu_state.seg_gs.base
#define ISA_CYCLES(x) (x * isa_cycles)
extern int cpu_cycles_read;
extern int cpu_cycles_read_l;
extern int cpu_cycles_write;
extern int cpu_cycles_write_l;
extern int cpu_prefetch_cycles;
extern int cpu_prefetch_width;
extern int cpu_mem_prefetch_cycles;
extern int cpu_rom_prefetch_cycles;
extern int cpu_waitstates;
extern int cpu_cache_int_enabled;
extern int cpu_cache_ext_enabled;
extern int cpu_isa_speed;
extern int cpu_pci_speed;
extern int cpu_agp_speed;
extern int timing_rr;
extern int timing_mr;
extern int timing_mrl;
extern int timing_rm;
extern int timing_rml;
extern int timing_mm;
extern int timing_mml;
extern int timing_bt;
extern int timing_bnt;
extern int timing_int;
extern int timing_int_rm;
extern int timing_int_v86;
extern int timing_int_pm;
extern int timing_int_pm_outer;
extern int timing_iret_rm;
extern int timing_iret_v86;
extern int timing_iret_pm;
extern int timing_iret_pm_outer;
extern int timing_call_rm;
extern int timing_call_pm;
extern int timing_call_pm_gate;
extern int timing_call_pm_gate_inner;
extern int timing_retf_rm;
extern int timing_retf_pm;
extern int timing_retf_pm_outer;
extern int timing_jmp_rm;
extern int timing_jmp_pm;
extern int timing_jmp_pm_gate;
extern int timing_misaligned;
extern int in_sys;
extern int unmask_a20_in_smm;
extern int cycles_main;
extern uint32_t old_rammask;
#ifdef USE_ACYCS
extern int acycs;
#endif
extern int pic_pending;
extern int is_vpc;
extern int soft_reset_mask;
extern int alt_access;
extern int cpu_end_block_after_ins;
extern uint16_t cpu_fast_off_count;
extern uint16_t cpu_fast_off_val;
extern uint32_t cpu_fast_off_flags;
/* Functions. */
extern int cpu_has_feature(int feature);
extern char *cpu_current_pc(char *bufp);
extern void cpu_update_waitstates(void);
extern void cpu_set(void);
extern void cpu_close(void);
extern void cpu_set_isa_speed(int speed);
extern void cpu_set_pci_speed(int speed);
extern void cpu_set_isa_pci_div(int div);
extern void cpu_set_agp_speed(int speed);
extern void cpu_CPUID(void);
extern void cpu_RDMSR(void);
extern void cpu_WRMSR(void);
extern int checkio(uint32_t port, int mask);
extern void codegen_block_end(void);
extern void codegen_reset(void);
extern void cpu_set_edx(void);
extern int divl(uint32_t val);
extern void execx86(int32_t cycs);
extern void enter_smm(int in_hlt);
extern void enter_smm_check(int in_hlt);
extern void leave_smm(void);
extern void exec386_2386(int32_t cycs);
extern void exec386(int32_t cycs);
extern void exec386_dynarec(int32_t cycs);
extern int idivl(int32_t val);
extern void resetmcr(void);
extern void resetx86(void);
extern void refreshread(void);
extern void resetreadlookup(void);
extern void softresetx86(void);
extern void hardresetx86(void);
extern void x86_int(int num);
extern void x86_int_sw(int num);
extern int x86_int_sw_rm(int num);
#ifdef ENABLE_808X_LOG
extern void dumpregs(int __force);
extern void x87_dumpregs(void);
extern void x87_reset(void);
#endif
extern int cpu_effective;
extern int cpu_alt_reset;
extern void cpu_dynamic_switch(int new_cpu);
extern void cpu_ven_reset(void);
extern void update_tsc(void);
extern int sysenter(uint32_t fetchdat);
extern int sysexit(uint32_t fetchdat);
extern int syscall_op(uint32_t fetchdat);
extern int sysret(uint32_t fetchdat);
extern cpu_family_t *cpu_get_family(const char *internal_name);
extern uint8_t cpu_is_eligible(const cpu_family_t *cpu_family, int cpu, int machine);
extern uint8_t cpu_family_is_eligible(const cpu_family_t *cpu_family, int machine);
extern int fpu_get_type(const cpu_family_t *cpu_family, int cpu, const char *internal_name);
extern const char *fpu_get_internal_name(const cpu_family_t *cpu_family, int cpu, int type);
extern const char *fpu_get_name_from_index(const cpu_family_t *cpu_family, int cpu, int c);
extern int fpu_get_type_from_index(const cpu_family_t *cpu_family, int cpu, int c);
void cyrix_load_seg_descriptor(uint32_t addr, x86seg *seg);
void cyrix_write_seg_descriptor(uint32_t addr, x86seg *seg);
#define SMHR_VALID (1 << 0)
#define SMHR_ADDR_MASK (0xfffffffc)
typedef union {
uint32_t fd;
uint8_t b[4];
} fetch_dat_t;
typedef struct {
struct {
uint32_t base;
uint64_t size;
} arr[8];
uint32_t smhr;
} cyrix_t;
extern uint32_t addr64;
extern uint32_t addr64_2;
extern uint32_t addr64a[8];
extern uint32_t addr64a_2[8];
extern int soft_reset_pci;
extern int reset_on_hlt;
extern int hlt_reset_pending;
extern cyrix_t cyrix;
extern int prefetch_prefixes;
extern int cpu_use_exec;
extern uint8_t use_custom_nmi_vector;
extern uint32_t custom_nmi_vector;
extern void (*cpu_exec)(int32_t cycs);
extern uint8_t do_translate;
extern uint8_t do_translate2;
extern void SF_FPU_reset(void);
extern void reset_808x(int hard);
extern void interrupt_808x(uint16_t addr);
extern void cpu_register_fast_off_handler(void *timer);
extern void cpu_fast_off_advance(void);
extern void cpu_fast_off_period_set(uint16_t vla, double period);
extern void cpu_fast_off_reset(void);
extern void smi_raise(void);
extern void nmi_raise(void);
extern MMX_REG *MMP[8];
extern uint16_t *MMEP[8];
extern int cpu_block_end;
extern int cpu_override_dynarec;
extern void mmx_init(void);
extern void prefetch_flush(void);
extern void prefetch_run(int instr_cycles, int bytes, int modrm, int reads, int reads_l, int writes, int writes_l, int ea32);
extern int lock_legal[256];
extern int lock_legal_0f[256];
extern int lock_legal_ba[8];
extern int lock_legal_80[8];
extern int lock_legal_f6[8];
extern int lock_legal_fe[8];
extern int in_lock;
extern int cpu_override_interpreter;
extern int is_lock_legal(uint32_t fetchdat);
#endif /*EMU_CPU_H*/
``` | /content/code_sandbox/src/cpu/cpu.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 7,304 |
```c
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_ops.h"
#include "x87_sf.h"
#include "x87.h"
#include "codegen.h"
#include "codegen_ops.h"
#include "codegen_timing_common.h"
#define CYCLES(c) (int *) c
#define CYCLES2(c16, c32) (int *) ((-1 & ~0xffff) | c16 | (c32 << 8))
static int *opcode_timings_winchip[256] = {
// clang-format off
/*00*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), NULL,
/*10*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3),
/*20*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3),
/*30*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2),
/*40*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*50*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*60*/ CYCLES(11), CYCLES(9), CYCLES(7), CYCLES(9), CYCLES(4), CYCLES(4), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES2(17,25), CYCLES(1), CYCLES2(17,20), CYCLES(17), CYCLES(17), CYCLES(17), CYCLES(17),
/*70*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*80*/ &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm, &timing_rm, CYCLES(5), CYCLES(5), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(1), CYCLES(5), CYCLES(6),
/*90*/ CYCLES(1), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(0), CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(2), CYCLES(3),
/*a0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(7), CYCLES(7), CYCLES(8), CYCLES(8), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6),
/*b0*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*c0*/ CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(1), CYCLES(1), CYCLES(14), CYCLES(5), CYCLES(0), CYCLES(0), &timing_int, &timing_int, CYCLES(3), CYCLES(0),
/*d0*/ CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(15), CYCLES(14), CYCLES(2), CYCLES(4), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(5), CYCLES(14), CYCLES(14), CYCLES(16), CYCLES(16), CYCLES(3), CYCLES(3), CYCLES(17), CYCLES(3), CYCLES(14), CYCLES(14), CYCLES(14), CYCLES(14),
/*f0*/ CYCLES(4), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(4), CYCLES(2), NULL, NULL, CYCLES(2), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(3), NULL
// clang-format on
};
static int *opcode_timings_winchip_mod3[256] = {
// clang-format off
/*00*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), NULL,
/*10*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3),
/*20*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3),
/*30*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2),
/*40*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*50*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*60*/ CYCLES(11), CYCLES(9), CYCLES(7), CYCLES(9), CYCLES(4), CYCLES(4), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES2(14,25), CYCLES(1), CYCLES2(17,20), CYCLES(17), CYCLES(17), CYCLES(17), CYCLES(17),
/*70*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*80*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(5), CYCLES(5), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(1), CYCLES(2), CYCLES(1),
/*90*/ CYCLES(1), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(0), CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(2), CYCLES(3),
/*a0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(7), CYCLES(7), CYCLES(8), CYCLES(8), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6),
/*b0*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*c0*/ CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(1), CYCLES(1), CYCLES(14), CYCLES(5), CYCLES(0), CYCLES(0), &timing_int, &timing_int, CYCLES(3), CYCLES(0),
/*d0*/ CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(15), CYCLES(14), CYCLES(2), CYCLES(4), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(5), CYCLES(14), CYCLES(14), CYCLES(16), CYCLES(16), CYCLES(3), CYCLES(3), CYCLES(17), CYCLES(3), CYCLES(14), CYCLES(14), CYCLES(14), CYCLES(14),
/*f0*/ CYCLES(4), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(4), CYCLES(2), NULL, NULL, CYCLES(2), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(3), NULL
// clang-format on
};
static int *opcode_timings_winchip_0f[256] = {
// clang-format off
/*00*/ CYCLES(20), CYCLES(11), CYCLES(11), CYCLES(10), NULL, CYCLES(195), CYCLES(7), NULL, CYCLES(1000), CYCLES(10000), NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ CYCLES(9), CYCLES(1), CYCLES(9), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, NULL, NULL, &timing_rm, &timing_rm,
/*70*/ NULL, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, CYCLES(100), NULL, NULL, NULL, NULL, NULL, NULL, &timing_rm, &timing_rm,
/*80*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*90*/ CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/*a0*/ CYCLES(3), CYCLES(3), CYCLES(14), CYCLES(8), CYCLES(3), CYCLES(4), NULL, NULL, CYCLES(3), CYCLES(3), NULL, CYCLES(13), CYCLES(3), CYCLES(3), NULL, CYCLES2(18,30),
/*b0*/ CYCLES(10), CYCLES(10), CYCLES(6), CYCLES(13), CYCLES(6), CYCLES(6), CYCLES(3), CYCLES(3), NULL, NULL, CYCLES(6), CYCLES(13), CYCLES(7), CYCLES(7), CYCLES(3), CYCLES(3),
/*c0*/ CYCLES(4), CYCLES(4), NULL, NULL, NULL, NULL, NULL, CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*d0*/ NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm, NULL, NULL, &timing_rm, &timing_rm, NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm,
/*e0*/ NULL, &timing_rm, &timing_rm, NULL, NULL, &timing_rm, NULL, NULL, &timing_rm, &timing_rm, NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm,
/*f0*/ NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm, NULL, NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm, &timing_rm, &timing_rm, NULL,
// clang-format on
};
static int *opcode_timings_winchip_0f_mod3[256] = {
// clang-format off
/*00*/ CYCLES(20), CYCLES(11), CYCLES(11), CYCLES(10), NULL, CYCLES(195), CYCLES(7), NULL, CYCLES(1000), CYCLES(10000), NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ CYCLES(9), CYCLES(1), CYCLES(9), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, NULL, NULL, &timing_rr, &timing_rr,
/*70*/ NULL, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(100), NULL, NULL, NULL, NULL, NULL, NULL, &timing_rr, &timing_rr,
/*80*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*90*/ CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/*a0*/ CYCLES(3), CYCLES(3), CYCLES(14), CYCLES(8), CYCLES(3), CYCLES(4), NULL, NULL, CYCLES(3), CYCLES(3), NULL, CYCLES(13), CYCLES(3), CYCLES(3), NULL, CYCLES2(18,30),
/*b0*/ CYCLES(10), CYCLES(10), CYCLES(6), CYCLES(13), CYCLES(6), CYCLES(6), CYCLES(3), CYCLES(3), NULL, NULL, CYCLES(6), CYCLES(13), CYCLES(7), CYCLES(7), CYCLES(3), CYCLES(3),
/*c0*/ CYCLES(4), CYCLES(4), NULL, NULL, NULL, NULL, NULL, CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*d0*/ NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr, NULL, NULL, &timing_rr, &timing_rr, NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr,
/*e0*/ NULL, &timing_rr, &timing_rr, NULL, NULL, &timing_rr, NULL, NULL, &timing_rr, &timing_rr, NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr,
/*f0*/ NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr, NULL, NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr, &timing_rr, &timing_rr, NULL,
// clang-format on
};
static int *opcode_timings_winchip_shift[8] = {
// clang-format off
CYCLES(7), CYCLES(7), CYCLES(10), CYCLES(10), CYCLES(7), CYCLES(7), CYCLES(7), CYCLES(7)
// clang-format on
};
static int *opcode_timings_winchip_shift_mod3[8] = {
// clang-format off
CYCLES(3), CYCLES(3), CYCLES(9), CYCLES(9), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3)
// clang-format on
};
static int *opcode_timings_winchip_f6[8] = {
// clang-format off
&timing_rm, NULL, &timing_mm, &timing_mm, CYCLES(13), CYCLES(14), CYCLES(16), CYCLES(19)
// clang-format on
};
static int *opcode_timings_winchip_f6_mod3[8] = {
// clang-format off
&timing_rr, NULL, &timing_rr, &timing_rr, CYCLES(13), CYCLES(14), CYCLES(16), CYCLES(19)
// clang-format on
};
static int *opcode_timings_winchip_f7[8] = {
// clang-format off
&timing_rm, NULL, &timing_mm, &timing_mm, CYCLES(21), CYCLES2(22,38), CYCLES2(24,40), CYCLES2(27,43)
// clang-format on
};
static int *opcode_timings_winchip_f7_mod3[8] = {
// clang-format off
&timing_rr, NULL, &timing_rr, &timing_rr, CYCLES(21), CYCLES2(22,38), CYCLES2(24,40), CYCLES2(27,43)
// clang-format on
};
static int *opcode_timings_winchip_ff[8] = {
// clang-format off
&timing_mm, &timing_mm, CYCLES(5), CYCLES(0), CYCLES(5), CYCLES(0), CYCLES(5), NULL
// clang-format on
};
static int *opcode_timings_winchip_ff_mod3[8] = {
// clang-format off
&timing_rr, &timing_rr, CYCLES(5), CYCLES(0), CYCLES(5), CYCLES(0), CYCLES(5), NULL
// clang-format on
};
static int *opcode_timings_winchip_d8[8] = {
// clang-format off
/* FADDil FMULil FCOMil FCOMPil FSUBil FSUBRil FDIVil FDIVRil*/
CYCLES(10), CYCLES(12), CYCLES(9), CYCLES(9), CYCLES(10), CYCLES(10), CYCLES(78), CYCLES(78)
// clang-format on
};
static int *opcode_timings_winchip_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP FSUB FSUBR FDIV FDIVR*/
CYCLES(4), CYCLES(6), CYCLES(3), CYCLES(3), CYCLES(4), CYCLES(4), CYCLES(72), CYCLES(72)
// clang-format on
};
static int *opcode_timings_winchip_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs FLDENV FLDCW FSTENV FSTCW*/
CYCLES(2), NULL, CYCLES(7), CYCLES(7), CYCLES(34), CYCLES(4), CYCLES(67), CYCLES(3)
// clang-format on
};
static int *opcode_timings_winchip_d9_mod3[64] = {
// clang-format off
/*FLD*/
CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*FXCH*/
CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4),
/*FNOP*/
CYCLES(7), NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*FSTP*/
CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/* opFCHS opFABS opFTST opFXAM*/
CYCLES(2), CYCLES(2), NULL, NULL, CYCLES(5), CYCLES(7), NULL, NULL,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI opFLDEG2 opFLDLN2 opFLDZ*/
CYCLES(5), CYCLES(7), CYCLES(7), CYCLES(7), CYCLES(7), CYCLES(7), CYCLES(5), NULL,
/* opF2XM1 opFYL2X opFPTAN opFPATAN opFDECSTP opFINCSTP,*/
CYCLES(300), CYCLES(58), CYCLES(676), CYCLES(355), NULL, NULL, CYCLES(3), CYCLES(3),
/* opFPREM opFSQRT opFSINCOS opFRNDINT opFSCALE opFSIN opFCOS*/
CYCLES(70), NULL, CYCLES(72), CYCLES(292), CYCLES(21), CYCLES(30), CYCLES(474), CYCLES(474)
// clang-format on
};
static int *opcode_timings_winchip_da[8] = {
// clang-format off
/* FADDil FMULil FCOMil FCOMPil FSUBil FSUBRil FDIVil FDIVRil*/
CYCLES(10), CYCLES(12), CYCLES(9), CYCLES(9), CYCLES(10), CYCLES(10), CYCLES(78), CYCLES(78)
// clang-format on
};
static int *opcode_timings_winchip_da_mod3[8] = {
// clang-format off
NULL, NULL, NULL, NULL, NULL, CYCLES(5), NULL, NULL
// clang-format on
};
static int *opcode_timings_winchip_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil FLDe FSTPe*/
CYCLES(6), NULL, CYCLES(7), CYCLES(7), NULL, CYCLES(8), NULL, CYCLES(8)
// clang-format on
};
static int *opcode_timings_winchip_db_mod3[64] = {
// clang-format off
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* opFNOP opFCLEX opFINIT opFNOP opFNOP*/
NULL, CYCLES(7), CYCLES(18), CYCLES(27), CYCLES(7), CYCLES(7), NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
static int *opcode_timings_winchip_dc[8] = {
// clang-format off
/* opFADDd_a16 opFMULd_a16 opFCOMd_a16 opFCOMPd_a16 opFSUBd_a16 opFSUBRd_a16 opFDIVd_a16 opFDIVRd_a16*/
CYCLES(6), CYCLES(8), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(74), CYCLES(74)
// clang-format on
};
static int *opcode_timings_winchip_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr opFSUBRr opFSUBr opFDIVRr opFDIVr*/
CYCLES(4), CYCLES(6), NULL, NULL, CYCLES(4), CYCLES(4), CYCLES(72), CYCLES(72)
// clang-format on
};
static int *opcode_timings_winchip_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd FRSTOR FSAVE FSTSW*/
CYCLES(2), NULL, CYCLES(8), CYCLES(8), CYCLES(131), NULL, CYCLES(154), CYCLES(5)
// clang-format on
};
static int *opcode_timings_winchip_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP FUCOM FUCOMP*/
CYCLES(3), NULL, CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(3), NULL, NULL
// clang-format on
};
static int *opcode_timings_winchip_de[8] = {
// clang-format off
/* FADDiw FMULiw FCOMiw FCOMPiw FSUBil FSUBRil FDIVil FDIVRil*/
CYCLES(10), CYCLES(12), CYCLES(9), CYCLES(9), CYCLES(10), CYCLES(10), CYCLES(78), CYCLES(78)
// clang-format on
};
static int *opcode_timings_winchip_de_mod3[8] = {
// clang-format off
/* FADD FMUL FCOMPP FSUB FSUBR FDIV FDIVR*/
CYCLES(4), CYCLES(6), NULL, CYCLES(3), CYCLES(4), CYCLES(4), CYCLES(72), CYCLES(72)
// clang-format on
};
static int *opcode_timings_winchip_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw FILDiq FBSTP FISTPiq*/
CYCLES(6), NULL, CYCLES(7), CYCLES(7), NULL, CYCLES(8), CYCLES(172), CYCLES(8)
// clang-format on
};
static int *opcode_timings_winchip_df_mod3[8] = {
// clang-format off
/* FFREE FST FSTP FUCOM FUCOMP*/
CYCLES(3), NULL, CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(3), NULL, NULL
// clang-format on
};
static int *opcode_timings_winchip_8x[8] = {
// clang-format off
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
};
static int *opcode_timings_winchip_8x_mod3[8] =
{
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
};
static int *opcode_timings_winchip_81[8] =
{
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
};
static int *opcode_timings_winchip_81_mod3[8] =
{
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
// clang-format on
};
static int timing_count;
static uint8_t last_prefix;
static uint32_t regmask_modified;
static inline int
COUNT(int *c, int op_32)
{
if ((uintptr_t) c <= 10000)
return (int) (uintptr_t) c;
if (((uintptr_t) c & ~0xffff) == (-1 & ~0xffff)) {
if (op_32 & 0x100)
return ((uintptr_t) c >> 8) & 0xff;
return (uintptr_t) c & 0xff;
}
return *c;
}
void
codegen_timing_winchip_block_start(void)
{
regmask_modified = 0;
}
void
codegen_timing_winchip_start(void)
{
timing_count = 0;
last_prefix = 0;
}
void
codegen_timing_winchip_prefix(uint8_t prefix, uint32_t fetchdat)
{
timing_count += COUNT(opcode_timings_winchip[prefix], 0);
last_prefix = prefix;
}
void
codegen_timing_winchip_opcode(uint8_t opcode, uint32_t fetchdat, int op_32, UNUSED(uint32_t op_pc))
{
int **timings;
const uint64_t *deps;
int mod3 = ((fetchdat & 0xc0) == 0xc0);
int bit8 = !(opcode & 1);
switch (last_prefix) {
case 0x0f:
timings = mod3 ? opcode_timings_winchip_0f_mod3 : opcode_timings_winchip_0f;
deps = mod3 ? opcode_deps_0f_mod3 : opcode_deps_0f;
break;
case 0xd8:
timings = mod3 ? opcode_timings_winchip_d8_mod3 : opcode_timings_winchip_d8;
deps = mod3 ? opcode_deps_d8_mod3 : opcode_deps_d8;
opcode = (opcode >> 3) & 7;
break;
case 0xd9:
timings = mod3 ? opcode_timings_winchip_d9_mod3 : opcode_timings_winchip_d9;
deps = mod3 ? opcode_deps_d9_mod3 : opcode_deps_d9;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xda:
timings = mod3 ? opcode_timings_winchip_da_mod3 : opcode_timings_winchip_da;
deps = mod3 ? opcode_deps_da_mod3 : opcode_deps_da;
opcode = (opcode >> 3) & 7;
break;
case 0xdb:
timings = mod3 ? opcode_timings_winchip_db_mod3 : opcode_timings_winchip_db;
deps = mod3 ? opcode_deps_db_mod3 : opcode_deps_db;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xdc:
timings = mod3 ? opcode_timings_winchip_dc_mod3 : opcode_timings_winchip_dc;
deps = mod3 ? opcode_deps_dc_mod3 : opcode_deps_dc;
opcode = (opcode >> 3) & 7;
break;
case 0xdd:
timings = mod3 ? opcode_timings_winchip_dd_mod3 : opcode_timings_winchip_dd;
deps = mod3 ? opcode_deps_dd_mod3 : opcode_deps_dd;
opcode = (opcode >> 3) & 7;
break;
case 0xde:
timings = mod3 ? opcode_timings_winchip_de_mod3 : opcode_timings_winchip_de;
deps = mod3 ? opcode_deps_de_mod3 : opcode_deps_de;
opcode = (opcode >> 3) & 7;
break;
case 0xdf:
timings = mod3 ? opcode_timings_winchip_df_mod3 : opcode_timings_winchip_df;
deps = mod3 ? opcode_deps_df_mod3 : opcode_deps_df;
opcode = (opcode >> 3) & 7;
break;
default:
switch (opcode) {
case 0x80:
case 0x82:
case 0x83:
timings = mod3 ? opcode_timings_winchip_8x_mod3 : opcode_timings_winchip_8x;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0x81:
timings = mod3 ? opcode_timings_winchip_81_mod3 : opcode_timings_winchip_81;
deps = mod3 ? opcode_deps_81_mod3 : opcode_deps_81;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc0:
case 0xc1:
case 0xd0:
case 0xd1:
case 0xd2:
case 0xd3:
timings = mod3 ? opcode_timings_winchip_shift_mod3 : opcode_timings_winchip_shift;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf6:
timings = mod3 ? opcode_timings_winchip_f6_mod3 : opcode_timings_winchip_f6;
deps = mod3 ? opcode_deps_f6_mod3 : opcode_deps_f6;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf7:
timings = mod3 ? opcode_timings_winchip_f7_mod3 : opcode_timings_winchip_f7;
deps = mod3 ? opcode_deps_f7_mod3 : opcode_deps_f7;
opcode = (fetchdat >> 3) & 7;
break;
case 0xff:
timings = mod3 ? opcode_timings_winchip_ff_mod3 : opcode_timings_winchip_ff;
deps = mod3 ? opcode_deps_ff_mod3 : opcode_deps_ff;
opcode = (fetchdat >> 3) & 7;
break;
default:
timings = mod3 ? opcode_timings_winchip_mod3 : opcode_timings_winchip;
deps = mod3 ? opcode_deps_mod3 : opcode_deps;
break;
}
}
timing_count += COUNT(timings[opcode], op_32);
if (regmask_modified & get_addr_regmask(deps[opcode], fetchdat, op_32))
timing_count++; /*AGI stall*/
codegen_block_cycles += timing_count;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8);
}
void
codegen_timing_winchip_block_end(void)
{
//
}
codegen_timing_t codegen_timing_winchip = {
codegen_timing_winchip_start,
codegen_timing_winchip_prefix,
codegen_timing_winchip_opcode,
codegen_timing_winchip_block_start,
codegen_timing_winchip_block_end,
NULL
};
``` | /content/code_sandbox/src/cpu/codegen_timing_winchip.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 10,728 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* 8080 CPU emulation.
*
* Authors: Cacodemon345
*
*/
#include <stdint.h>
#include <stdlib.h>
#include "cpu.h"
#include <86box/timer.h>
#include <86box/i8080.h>
#include <86box/mem.h>
#include <86box/plat_unused.h>
static int completed = 1;
static int in_rep = 0;
static int repeating = 0;
static int rep_c_flag = 0;
static int oldc;
static int cycdiff;
#ifdef UNUSED_8080_VARS
static int prefetching = 1;
static int refresh = 0;
static int clear_lock = 0;
static uint32_t cpu_src = 0;
static uint32_t cpu_dest = 0;
static uint32_t cpu_data = 0;
#endif
static void
clock_start(void)
{
cycdiff = cycles;
}
static void
clock_end(void)
{
int diff = cycdiff - cycles;
/* On 808x systems, clock speed is usually crystal frequency divided by an integer. */
tsc += (uint64_t) diff * (xt_cpu_multi >> 32ULL); /* Shift xt_cpu_multi by 32 bits to the right and then multiply. */
if (TIMER_VAL_LESS_THAN_VAL(timer_target, (uint32_t) tsc))
timer_process();
}
static void
i8080_wait(int c, int bus)
{
cycles -= c;
if (bus < 2) {
clock_end();
clock_start();
}
}
#ifdef UNUSED_8080_FUNCS
static uint8_t
readmemb(uint32_t a)
{
uint8_t ret;
i8080_wait(4, 1);
ret = read_mem_b(a);
return ret;
}
static uint8_t
ins_fetch(i8080 *cpu)
{
uint8_t ret = cpu->readmembyte(cpu->pmembase + cpu->pc);
cpu->pc++;
return ret;
}
#endif
void
transfer_from_808x(i8080 *cpu)
{
cpu->hl = BX;
cpu->bc = CX;
cpu->de = DX;
cpu->a = AL;
cpu->flags = cpu_state.flags & 0xFF;
cpu->sp = BP;
cpu->pc = cpu_state.pc;
cpu->oldpc = cpu_state.oldpc;
cpu->pmembase = cs;
cpu->dmembase = ds;
}
void
transfer_to_808x(i8080 *cpu)
{
BX = cpu->hl;
CX = cpu->bc;
DX = cpu->de;
AL = cpu->a;
cpu_state.flags &= 0xFF00;
cpu_state.flags |= cpu->flags & 0xFF;
BP = cpu->sp;
cpu_state.pc = cpu->pc;
}
uint8_t
getreg_i8080(i8080 *cpu, uint8_t reg)
{
uint8_t ret = 0xFF;
switch (reg) {
case 0x0:
ret = cpu->b;
break;
case 0x1:
ret = cpu->c;
break;
case 0x2:
ret = cpu->d;
break;
case 0x3:
ret = cpu->e;
break;
case 0x4:
ret = cpu->h;
break;
case 0x5:
ret = cpu->l;
break;
case 0x6:
ret = cpu->readmembyte(cpu->dmembase + cpu->sp);
break;
case 0x7:
ret = cpu->a;
break;
}
return ret;
}
uint8_t
getreg_i8080_emu(i8080 *cpu, uint8_t reg)
{
uint8_t ret = 0xFF;
switch (reg) {
case 0x0:
ret = CH;
break;
case 0x1:
ret = CL;
break;
case 0x2:
ret = DH;
break;
case 0x3:
ret = DL;
break;
case 0x4:
ret = BH;
break;
case 0x5:
ret = BL;
break;
case 0x6:
ret = cpu->readmembyte(cpu->dmembase + BP);
break;
case 0x7:
ret = AL;
break;
}
return ret;
}
void
setreg_i8080_emu(i8080 *cpu, uint8_t reg, uint8_t val)
{
switch (reg) {
case 0x0:
CH = val;
break;
case 0x1:
CL = val;
break;
case 0x2:
DH = val;
break;
case 0x3:
DL = val;
break;
case 0x4:
BH = val;
break;
case 0x5:
BL = val;
break;
case 0x6:
cpu->writemembyte(cpu->dmembase + BP, val);
break;
case 0x7:
AL = val;
break;
}
}
void
setreg_i8080(i8080 *cpu, uint8_t reg, uint8_t val)
{
switch (reg) {
case 0x0:
cpu->b = val;
break;
case 0x1:
cpu->c = val;
break;
case 0x2:
cpu->d = val;
break;
case 0x3:
cpu->e = val;
break;
case 0x4:
cpu->h = val;
break;
case 0x5:
cpu->l = val;
break;
case 0x6:
cpu->writemembyte(cpu->dmembase + cpu->sp, val);
break;
case 0x7:
cpu->a = val;
break;
}
}
void
interpret_exec8080(UNUSED(i8080 *cpu), uint8_t opcode)
{
switch (opcode) {
case 0x00:
{
break;
}
}
}
/* Actually implement i8080 emulation. */
void
exec8080(i8080 *cpu, int cycs)
{
#ifdef UNUSED_8080_VARS
uint8_t temp = 0, temp2;
uint8_t old_af;
uint8_t handled = 0;
uint16_t addr, tempw;
uint16_t new_ip;
int bits;
#endif
cycles += cycs;
while (cycles > 0) {
cpu->startclock();
if (!repeating) {
cpu->oldpc = cpu->pc;
opcode = cpu->fetchinstruction(cpu);
oldc = cpu->flags & C_FLAG_I8080;
i8080_wait(1, 0);
}
completed = 1;
if (completed) {
repeating = 0;
in_rep = 0;
rep_c_flag = 0;
cpu->endclock();
if (cpu->checkinterrupts)
cpu->checkinterrupts();
}
}
}
``` | /content/code_sandbox/src/cpu/8080.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,675 |
```objective-c
#define cond_O (VF_SET())
#define cond_NO (!VF_SET())
#define cond_B (CF_SET())
#define cond_NB (!CF_SET())
#define cond_E (ZF_SET())
#define cond_NE (!ZF_SET())
#define cond_BE (CF_SET() || ZF_SET())
#define cond_NBE (!CF_SET() && !ZF_SET())
#define cond_S (NF_SET())
#define cond_NS (!NF_SET())
#define cond_P (PF_SET())
#define cond_NP (!PF_SET())
#define cond_L (((NF_SET()) ? 1 : 0) != ((VF_SET()) ? 1 : 0))
#define cond_NL (((NF_SET()) ? 1 : 0) == ((VF_SET()) ? 1 : 0))
#define cond_LE (((NF_SET()) ? 1 : 0) != ((VF_SET()) ? 1 : 0) || (ZF_SET()))
#define cond_NLE (((NF_SET()) ? 1 : 0) == ((VF_SET()) ? 1 : 0) && (!ZF_SET()))
#define opJ(condition) \
static int opJ##condition(uint32_t fetchdat) \
{ \
int8_t offset = (int8_t) getbytef(); \
CLOCK_CYCLES(timing_bnt); \
if (cond_##condition) { \
cpu_state.pc += offset; \
if (!(cpu_state.op32 & 0x100)) \
cpu_state.pc &= 0xffff; \
CLOCK_CYCLES_ALWAYS(timing_bt); \
CPU_BLOCK_END(); \
PREFETCH_RUN(timing_bt + timing_bnt, 2, -1, 0, 0, 0, 0, 0); \
PREFETCH_FLUSH(); \
return 1; \
} \
PREFETCH_RUN(timing_bnt, 2, -1, 0, 0, 0, 0, 0); \
return 0; \
} \
\
static int opJ##condition##_w(uint32_t fetchdat) \
{ \
int16_t offset = (int16_t) getwordf(); \
CLOCK_CYCLES(timing_bnt); \
if (cond_##condition) { \
cpu_state.pc += offset; \
cpu_state.pc &= 0xffff; \
CLOCK_CYCLES_ALWAYS(timing_bt); \
CPU_BLOCK_END(); \
PREFETCH_RUN(timing_bt + timing_bnt, 3, -1, 0, 0, 0, 0, 0); \
PREFETCH_FLUSH(); \
return 1; \
} \
PREFETCH_RUN(timing_bnt, 3, -1, 0, 0, 0, 0, 0); \
return 0; \
} \
\
static int opJ##condition##_l(uint32_t fetchdat) \
{ \
uint32_t offset = getlong(); \
if (cpu_state.abrt) \
return 1; \
CLOCK_CYCLES(timing_bnt); \
if (cond_##condition) { \
cpu_state.pc += offset; \
CLOCK_CYCLES_ALWAYS(timing_bt); \
CPU_BLOCK_END(); \
PREFETCH_RUN(timing_bt + timing_bnt, 5, -1, 0, 0, 0, 0, 0); \
PREFETCH_FLUSH(); \
return 1; \
} \
PREFETCH_RUN(timing_bnt, 5, -1, 0, 0, 0, 0, 0); \
return 0; \
}
// clang-format off
opJ(O)
opJ(NO)
opJ(B)
opJ(NB)
opJ(E)
opJ(NE)
opJ(BE)
opJ(NBE)
opJ(S)
opJ(NS)
opJ(P)
opJ(NP)
opJ(L)
opJ(NL)
opJ(LE)
opJ(NLE)
// clang-format on
static int
opLOOPNE_w(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
CX--;
CLOCK_CYCLES((is486) ? 7 : 11);
PREFETCH_RUN(11, 2, -1, 0, 0, 0, 0, 0);
if (CX && !ZF_SET()) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
PREFETCH_FLUSH();
return 1;
}
return 0;
}
static int
opLOOPNE_l(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
ECX--;
CLOCK_CYCLES((is486) ? 7 : 11);
PREFETCH_RUN(11, 2, -1, 0, 0, 0, 0, 0);
if (ECX && !ZF_SET()) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
PREFETCH_FLUSH();
return 1;
}
return 0;
}
static int
opLOOPE_w(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
CX--;
CLOCK_CYCLES((is486) ? 7 : 11);
PREFETCH_RUN(11, 2, -1, 0, 0, 0, 0, 0);
if (CX && ZF_SET()) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
PREFETCH_FLUSH();
return 1;
}
return 0;
}
static int
opLOOPE_l(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
ECX--;
CLOCK_CYCLES((is486) ? 7 : 11);
PREFETCH_RUN(11, 2, -1, 0, 0, 0, 0, 0);
if (ECX && ZF_SET()) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
PREFETCH_FLUSH();
return 1;
}
return 0;
}
static int
opLOOP_w(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
CX--;
CLOCK_CYCLES((is486) ? 7 : 11);
PREFETCH_RUN(11, 2, -1, 0, 0, 0, 0, 0);
if (CX) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
PREFETCH_FLUSH();
return 1;
}
return 0;
}
static int
opLOOP_l(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
ECX--;
CLOCK_CYCLES((is486) ? 7 : 11);
PREFETCH_RUN(11, 2, -1, 0, 0, 0, 0, 0);
if (ECX) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
PREFETCH_FLUSH();
return 1;
}
return 0;
}
static int
opJCXZ(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
CLOCK_CYCLES(5);
if (!CX) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CLOCK_CYCLES(4);
CPU_BLOCK_END();
PREFETCH_RUN(9, 2, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 1;
}
PREFETCH_RUN(5, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opJECXZ(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
CLOCK_CYCLES(5);
if (!ECX) {
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CLOCK_CYCLES(4);
CPU_BLOCK_END();
PREFETCH_RUN(9, 2, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 1;
}
PREFETCH_RUN(5, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opJMP_r8(uint32_t fetchdat)
{
int8_t offset = (int8_t) getbytef();
cpu_state.pc += offset;
if (!(cpu_state.op32 & 0x100))
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 3 : 7);
PREFETCH_RUN(7, 2, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opJMP_r16(uint32_t fetchdat)
{
int16_t offset = (int16_t) getwordf();
cpu_state.pc += offset;
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 3 : 7);
PREFETCH_RUN(7, 3, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opJMP_r32(uint32_t fetchdat)
{
int32_t offset = (int32_t) getlong();
if (cpu_state.abrt)
return 1;
cpu_state.pc += offset;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 3 : 7);
PREFETCH_RUN(7, 5, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opJMP_far_a16(uint32_t fetchdat)
{
uint16_t addr;
uint16_t seg;
uint32_t old_pc;
addr = getwordf();
seg = getword();
if (cpu_state.abrt)
return 1;
old_pc = cpu_state.pc;
cpu_state.pc = addr;
op_loadcsjmp(seg, old_pc);
CPU_BLOCK_END();
PREFETCH_RUN(11, 5, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opJMP_far_a32(uint32_t fetchdat)
{
uint16_t seg;
uint32_t addr;
uint32_t old_pc;
addr = getlong();
seg = getword();
if (cpu_state.abrt)
return 1;
old_pc = cpu_state.pc;
cpu_state.pc = addr;
op_loadcsjmp(seg, old_pc);
CPU_BLOCK_END();
PREFETCH_RUN(11, 7, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opCALL_r16(uint32_t fetchdat)
{
int16_t addr = (int16_t) getwordf();
PUSH_W(cpu_state.pc);
cpu_state.pc += addr;
cpu_state.pc &= 0xffff;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 3 : 7);
PREFETCH_RUN(7, 3, -1, 0, 0, 1, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opCALL_r32(uint32_t fetchdat)
{
int32_t addr = getlong();
if (cpu_state.abrt)
return 1;
PUSH_L(cpu_state.pc);
cpu_state.pc += addr;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 3 : 7);
PREFETCH_RUN(7, 5, -1, 0, 0, 0, 1, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRET_w(uint32_t fetchdat)
{
uint16_t ret;
ret = POP_W();
if (cpu_state.abrt)
return 1;
cpu_state.pc = ret;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 5 : 10);
PREFETCH_RUN(10, 1, -1, 1, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRET_l(uint32_t fetchdat)
{
uint32_t ret;
ret = POP_L();
if (cpu_state.abrt)
return 1;
cpu_state.pc = ret;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 5 : 10);
PREFETCH_RUN(10, 1, -1, 0, 1, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRET_w_imm(uint32_t fetchdat)
{
uint16_t ret;
uint16_t offset = getwordf();
ret = POP_W();
if (cpu_state.abrt)
return 1;
if (stack32)
ESP += offset;
else
SP += offset;
cpu_state.pc = ret;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 5 : 10);
PREFETCH_RUN(10, 5, -1, 1, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRET_l_imm(uint32_t fetchdat)
{
uint32_t ret;
uint16_t offset = getwordf();
ret = POP_L();
if (cpu_state.abrt)
return 1;
if (stack32)
ESP += offset;
else
SP += offset;
cpu_state.pc = ret;
CPU_BLOCK_END();
CLOCK_CYCLES((is486) ? 5 : 10);
PREFETCH_RUN(10, 5, -1, 0, 1, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_jump.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 3,359 |
```objective-c
#include <math.h>
static int
opPREFETCH_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
CLOCK_CYCLES(1);
return 0;
}
static int
opPREFETCH_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
CLOCK_CYCLES(1);
return 0;
}
static int
opFEMMS(uint32_t fetchdat)
{
ILLEGAL_ON(!cpu_has_feature(CPU_FEATURE_MMX));
if (cr0 & 0xc) {
x86_int(7);
return 1;
}
x87_emms();
CLOCK_CYCLES(1);
return 0;
}
static int
opPAVGUSB(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[0] = (dst->b[0] + src.b[0] + 1) >> 1;
dst->b[1] = (dst->b[1] + src.b[1] + 1) >> 1;
dst->b[2] = (dst->b[2] + src.b[2] + 1) >> 1;
dst->b[3] = (dst->b[3] + src.b[3] + 1) >> 1;
dst->b[4] = (dst->b[4] + src.b[4] + 1) >> 1;
dst->b[5] = (dst->b[5] + src.b[5] + 1) >> 1;
dst->b[6] = (dst->b[6] + src.b[6] + 1) >> 1;
dst->b[7] = (dst->b[7] + src.b[7] + 1) >> 1;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPF2ID(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->sl[0] = (int32_t) src.f[0];
dst->sl[1] = (int32_t) src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPF2IW(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->sw[0] = (int32_t) src.f[0];
dst->sw[1] = (int32_t) src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFACC(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
float tempf;
MMX_GETSRC();
tempf = dst->f[0] + dst->f[1];
dst->f[1] = src.f[0] + src.f[1];
dst->f[0] = tempf;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFNACC(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
float tempf;
MMX_GETSRC();
tempf = dst->f[0] - dst->f[1];
dst->f[1] = src.f[0] - src.f[1];
dst->f[0] = tempf;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFPNACC(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
float tempf;
MMX_GETSRC();
tempf = dst->f[0] - dst->f[1];
dst->f[1] = src.f[0] + src.f[1];
dst->f[0] = tempf;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSWAPD(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
float tempf;
float tempf2;
MMX_GETSRC();
/* We have to do this in case source and destination overlap. */
tempf = src.f[0];
tempf2 = src.f[1];
dst->f[1] = tempf;
dst->f[0] = tempf2;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFADD(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] += src.f[0];
dst->f[1] += src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFCMPEQ(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->l[0] = (dst->f[0] == src.f[0]) ? 0xffffffff : 0;
dst->l[1] = (dst->f[1] == src.f[1]) ? 0xffffffff : 0;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFCMPGE(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->l[0] = (dst->f[0] >= src.f[0]) ? 0xffffffff : 0;
dst->l[1] = (dst->f[1] >= src.f[1]) ? 0xffffffff : 0;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFCMPGT(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->l[0] = (dst->f[0] > src.f[0]) ? 0xffffffff : 0;
dst->l[1] = (dst->f[1] > src.f[1]) ? 0xffffffff : 0;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFMAX(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
if (src.f[0] > dst->f[0])
dst->f[0] = src.f[0];
if (src.f[1] > dst->f[1])
dst->f[1] = src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFMIN(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
if (src.f[0] < dst->f[0])
dst->f[0] = src.f[0];
if (src.f[1] < dst->f[1])
dst->f[1] = src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFMUL(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] *= src.f[0];
dst->f[1] *= src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFRCP(uint32_t fetchdat)
{
MMX_REG *dst = MMX_GETREGP(cpu_reg);
union {
uint32_t i;
float f;
} src;
if (cpu_mod == 3) {
src.f = (MMX_GETREG(cpu_rm)).f[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
src.i = readmeml(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
dst->f[0] = 1.0 / src.f;
dst->f[1] = dst->f[0];
MMX_SETEXP(cpu_reg);
return 0;
}
/*Since opPFRCP() calculates a full precision reciprocal, treat the followup iterations as MOVs*/
static int
opPFRCPIT1(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] = src.f[0];
dst->f[1] = src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFRCPIT2(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] = src.f[0];
dst->f[1] = src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFRSQRT(uint32_t fetchdat)
{
MMX_REG *dst = MMX_GETREGP(cpu_reg);
union {
uint32_t i;
float f;
} src;
if (cpu_mod == 3) {
src.f = (MMX_GETREG(cpu_rm)).f[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
src.i = readmeml(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
dst->f[0] = 1.0 / sqrt(src.f);
dst->f[1] = dst->f[0];
MMX_SETEXP(cpu_reg);
return 0;
}
/*Since opPFRSQRT() calculates a full precision inverse square root, treat the followup iteration as a NOP*/
static int
opPFRSQIT1(uint32_t fetchdat)
{
MMX_REG src;
MMX_GETSRC();
UN_USED(src);
return 0;
}
static int
opPFSUB(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] -= src.f[0];
dst->f[1] -= src.f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPFSUBR(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] = src.f[0] - dst->f[0];
dst->f[1] = src.f[1] - dst->f[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPI2FD(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] = (float) src.sl[0];
dst->f[1] = (float) src.sl[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPI2FW(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->f[0] = (float) src.sw[0];
dst->f[1] = (float) src.sw[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPMULHRW(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
src = MMX_GETREG(cpu_rm);
dst->w[0] = (((int32_t) dst->sw[0] * (int32_t) src.sw[0]) + 0x8000) >> 16;
dst->w[1] = (((int32_t) dst->sw[1] * (int32_t) src.sw[1]) + 0x8000) >> 16;
dst->w[2] = (((int32_t) dst->sw[2] * (int32_t) src.sw[2]) + 0x8000) >> 16;
dst->w[3] = (((int32_t) dst->sw[3] * (int32_t) src.sw[3]) + 0x8000) >> 16;
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
src.l[0] = readmeml(easeg, cpu_state.eaaddr);
src.l[1] = readmeml(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 0;
dst->w[0] = ((int32_t) (dst->sw[0] * (int32_t) src.sw[0]) + 0x8000) >> 16;
dst->w[1] = ((int32_t) (dst->sw[1] * (int32_t) src.sw[1]) + 0x8000) >> 16;
dst->w[2] = ((int32_t) (dst->sw[2] * (int32_t) src.sw[2]) + 0x8000) >> 16;
dst->w[3] = ((int32_t) (dst->sw[3] * (int32_t) src.sw[3]) + 0x8000) >> 16;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
const OpFn OP_TABLE(3DNOW)[256] = {
// clang-format off
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, opPI2FD, ILLEGAL, ILLEGAL,
/*10*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, opPF2ID, ILLEGAL, ILLEGAL,
/*20*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*30*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*40*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*50*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*60*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*70*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*80*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*90*/ opPFCMPGE, ILLEGAL, ILLEGAL, ILLEGAL, opPFMIN, ILLEGAL, opPFRCP, opPFRSQRT, ILLEGAL, ILLEGAL, opPFSUB, ILLEGAL, ILLEGAL, ILLEGAL, opPFADD, ILLEGAL,
/*a0*/ opPFCMPGT, ILLEGAL, ILLEGAL, ILLEGAL, opPFMAX, ILLEGAL, opPFRCPIT1, opPFRSQIT1, ILLEGAL, ILLEGAL, opPFSUBR, ILLEGAL, ILLEGAL, ILLEGAL, opPFACC, ILLEGAL,
/*b0*/ opPFCMPEQ, ILLEGAL, ILLEGAL, ILLEGAL, opPFMUL, ILLEGAL, opPFRCPIT2, opPMULHRW, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, opPAVGUSB,
/*c0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*d0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*e0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*f0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
// clang-format on
};
const OpFn OP_TABLE(3DNOWE)[256] = {
// clang-format off
/* 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f*/
/*00*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, opPI2FW, opPI2FD, ILLEGAL, ILLEGAL,
/*10*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, opPF2IW, opPF2ID, ILLEGAL, ILLEGAL,
/*20*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*30*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*40*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*50*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*60*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*70*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*80*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, opPFNACC, ILLEGAL, ILLEGAL, ILLEGAL, opPFPNACC, ILLEGAL,
/*90*/ opPFCMPGE, ILLEGAL, ILLEGAL, ILLEGAL, opPFMIN, ILLEGAL, opPFRCP, opPFRSQRT, ILLEGAL, ILLEGAL, opPFSUB, ILLEGAL, ILLEGAL, ILLEGAL, opPFADD, ILLEGAL,
/*a0*/ opPFCMPGT, ILLEGAL, ILLEGAL, ILLEGAL, opPFMAX, ILLEGAL, opPFRCPIT1, opPFRSQIT1, ILLEGAL, ILLEGAL, opPFSUBR, ILLEGAL, ILLEGAL, ILLEGAL, opPFACC, ILLEGAL,
/*b0*/ opPFCMPEQ, ILLEGAL, ILLEGAL, ILLEGAL, opPFMUL, ILLEGAL, opPFRCPIT2, opPMULHRW, ILLEGAL, ILLEGAL, ILLEGAL, opPSWAPD, ILLEGAL, ILLEGAL, ILLEGAL, opPAVGUSB,
/*c0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*d0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*e0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
/*f0*/ ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL, ILLEGAL,
// clang-format on
};
static int
op3DNOW_a16(uint32_t fetchdat)
{
uint8_t opcode;
MMX_ENTER();
fetch_ea_16(fetchdat);
opcode = fastreadb(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
return x86_opcodes_3DNOW[opcode](0);
}
static int
op3DNOW_a32(uint32_t fetchdat)
{
uint8_t opcode;
MMX_ENTER();
fetch_ea_32(fetchdat);
opcode = fastreadb(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
return x86_opcodes_3DNOW[opcode](0);
}
``` | /content/code_sandbox/src/cpu/x86_ops_3dnow.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 5,913 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* x87 FPU instructions core.
*
*
*
* Authors: Sarah Walker, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#define swap_values16u(a, b) \
{ \
uint16_t tmp = a; \
a = b; \
b = tmp; \
}
static int
sf_FILDiw_a16(uint32_t fetchdat)
{
floatx80 result;
int16_t temp;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
result = i32_to_extF80(temp);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_16) : (x87_timings.fild_16 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_16) : (x87_concurrency.fild_16 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FILDiw_a32(uint32_t fetchdat)
{
floatx80 result;
int16_t temp;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
result = i32_to_extF80(temp);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_16) : (x87_timings.fild_16 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_16) : (x87_concurrency.fild_16 * cpu_multi));
return 0;
}
#endif
static int
sf_FILDil_a16(uint32_t fetchdat)
{
floatx80 result;
int32_t templ;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
templ = geteal();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
result = i32_to_extF80(templ);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_32) : (x87_timings.fild_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_32) : (x87_concurrency.fild_32 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FILDil_a32(uint32_t fetchdat)
{
floatx80 result;
int32_t templ;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
templ = geteal();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
result = i32_to_extF80(templ);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_32) : (x87_timings.fild_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_32) : (x87_concurrency.fild_32 * cpu_multi));
return 0;
}
#endif
static int
sf_FILDiq_a16(uint32_t fetchdat)
{
floatx80 result;
int64_t temp64;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
temp64 = geteaq();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
result = i64_to_extF80(temp64);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_64) : (x87_timings.fild_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_64) : (x87_concurrency.fild_64 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FILDiq_a32(uint32_t fetchdat)
{
floatx80 result;
int64_t temp64;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
temp64 = geteaq();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
result = i64_to_extF80(temp64);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_64) : (x87_timings.fild_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_64) : (x87_concurrency.fild_64 * cpu_multi));
return 0;
}
#endif
static int
sf_FBLD_PACKED_BCD_a16(uint32_t fetchdat)
{
floatx80 result;
uint16_t load_reg_hi;
uint64_t load_reg_lo;
int64_t val64 = 0;
int64_t scale = 1;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
load_reg_hi = readmemw(easeg, (cpu_state.eaaddr + 8) & 0xffff);
load_reg_lo = readmemq(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
for (int n = 0; n < 16; n++) {
val64 += ((load_reg_lo & 0x0f) * scale);
load_reg_lo >>= 4;
scale *= 10;
}
val64 += ((load_reg_hi & 0x0f) * scale);
val64 += (((load_reg_hi >> 4) & 0x0f) * scale * 10);
result = (floatx80) i64_to_extF80(val64);
if (load_reg_hi & 0x8000)
floatx80_chs(result);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_64) : (x87_timings.fild_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_64) : (x87_concurrency.fild_64 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FBLD_PACKED_BCD_a32(uint32_t fetchdat)
{
floatx80 result;
uint16_t load_reg_hi;
uint64_t load_reg_lo;
int64_t val64 = 0;
int64_t scale = 1;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
load_reg_hi = readmemw(easeg, (cpu_state.eaaddr + 8) & 0xffff);
load_reg_lo = readmemq(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
for (int n = 0; n < 16; n++) {
val64 += ((load_reg_lo & 0x0f) * scale);
load_reg_lo >>= 4;
scale *= 10;
}
val64 += ((load_reg_hi & 0x0f) * scale);
val64 += (((load_reg_hi >> 4) & 0x0f) * scale * 10);
result = (floatx80) i64_to_extF80(val64);
if (load_reg_hi & 0x8000)
floatx80_chs(result);
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fild_64) : (x87_timings.fild_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fild_64) : (x87_concurrency.fild_64 * cpu_multi));
return 0;
}
#endif
static int
sf_FLDs_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
floatx80 result;
float32 load_reg;
unsigned unmasked;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
load_reg = geteal();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
result = f32_to_extF80(load_reg, &status);
unmasked = FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
if (!(unmasked & FPU_CW_Invalid)) {
FPU_push();
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_32) : (x87_timings.fst_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_32) : (x87_concurrency.fst_32 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FLDs_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
floatx80 result;
float32 load_reg;
unsigned unmasked;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
load_reg = geteal();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
result = f32_to_extF80(load_reg, &status);
unmasked = FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
if (!(unmasked & FPU_CW_Invalid)) {
FPU_push();
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_32) : (x87_timings.fst_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_32) : (x87_concurrency.fst_32 * cpu_multi));
return 0;
}
#endif
static int
sf_FLDd_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
floatx80 result;
float64 load_reg;
unsigned unmasked;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
load_reg = geteaq();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
result = f64_to_extF80(load_reg, &status);
unmasked = FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
if (!(unmasked & FPU_CW_Invalid)) {
FPU_push();
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_64) : (x87_timings.fld_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_64) : (x87_concurrency.fld_64 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FLDd_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
floatx80 result;
float64 load_reg;
unsigned unmasked;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
load_reg = geteaq();
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
result = f64_to_extF80(load_reg, &status);
unmasked = FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
if (!(unmasked & FPU_CW_Invalid)) {
FPU_push();
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_64) : (x87_timings.fld_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_64) : (x87_concurrency.fld_64 * cpu_multi));
return 0;
}
#endif
static int
sf_FLDe_a16(uint32_t fetchdat)
{
floatx80 result;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
result.signif = readmemq(easeg, cpu_state.eaaddr);
result.signExp = readmemw(easeg, cpu_state.eaaddr + 8);
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_80) : (x87_timings.fld_80 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_80) : (x87_concurrency.fld_80 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FLDe_a32(uint32_t fetchdat)
{
floatx80 result;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
result.signif = readmemq(easeg, cpu_state.eaaddr);
result.signExp = readmemw(easeg, cpu_state.eaaddr + 8);
if (cpu_state.abrt)
return 1;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
} else {
FPU_push();
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_80) : (x87_timings.fld_80 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_80) : (x87_concurrency.fld_80 * cpu_multi));
return 0;
}
#endif
static int
sf_FLD_sti(uint32_t fetchdat)
{
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
floatx80 sti_reg;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1)) {
FPU_stack_overflow(fetchdat);
goto next_ins;
}
sti_reg = floatx80_default_nan;
if (IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else
sti_reg = FPU_read_regi(fetchdat & 7);
FPU_push();
FPU_save_regi(sti_reg, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld) : (x87_timings.fld * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld) : (x87_concurrency.fld * cpu_multi));
return 0;
}
static int
sf_FISTiw_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int16_t save_reg = int16_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked()) {
goto next_ins;
}
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i16(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaw(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_16) : (x87_timings.fist_16 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_16) : (x87_concurrency.fist_16 * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FISTiw_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int16_t save_reg = int16_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i16(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaw(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_16) : (x87_timings.fist_16 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_16) : (x87_concurrency.fist_16 * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FISTPiw_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int16_t save_reg = int16_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i16(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaw(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_16) : (x87_timings.fist_16 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_16) : (x87_concurrency.fist_16 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FISTPiw_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int16_t save_reg = int16_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i16(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaw(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_16) : (x87_timings.fist_16 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_16) : (x87_concurrency.fist_16 * cpu_multi));
return 0;
}
#endif
static int
sf_FISTil_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int32_t save_reg = int32_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i32_normal(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_32) : (x87_timings.fist_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_32) : (x87_concurrency.fist_32 * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FISTil_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int32_t save_reg = int32_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i32_normal(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_32) : (x87_timings.fist_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_32) : (x87_concurrency.fist_32 * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FISTPil_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int32_t save_reg = int32_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i32_normal(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_32) : (x87_timings.fist_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_32) : (x87_concurrency.fist_32 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FISTPil_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int32_t save_reg = int32_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i32_normal(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_32) : (x87_timings.fist_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_32) : (x87_concurrency.fist_32 * cpu_multi));
return 0;
}
#endif
static int
sf_FISTPiq_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int64_t save_reg = int64_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i64_normal(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case origial FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaq(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_64) : (x87_timings.fist_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_64) : (x87_concurrency.fist_64 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FISTPiq_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
int64_t save_reg = int64_indefinite;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_i64_normal(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case origial FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaq(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fist_64) : (x87_timings.fist_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fist_64) : (x87_concurrency.fist_64 * cpu_multi));
return 0;
}
#endif
static int
sf_FBSTP_PACKED_BCD_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
uint16_t save_reg_hi = 0xffff;
uint64_t save_reg_lo = BX_CONST64(0xC000000000000000);
floatx80 reg;
int64_t save_val;
int sign;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
reg = FPU_read_regi(0);
save_val = extF80_to_i64_normal(reg, &status);
sign = extF80_sign(reg);
if (sign)
save_val = -save_val;
if (save_val > BX_CONST64(999999999999999999))
softfloat_setFlags(&status, softfloat_flag_invalid); // throw away other flags
if (!(status.softfloat_exceptionFlags & softfloat_flag_invalid)) {
save_reg_hi = sign ? 0x8000 : 0;
save_reg_lo = 0;
for (int i = 0; i < 16; i++) {
save_reg_lo += ((uint64_t) (save_val % 10)) << (4 * i);
save_val /= 10;
}
save_reg_hi += (uint16_t) (save_val % 10);
save_val /= 10;
save_reg_hi += (uint16_t) (save_val % 10) << 4;
}
/* check for fpu arithmetic exceptions */
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
// write packed bcd to memory
writememq(easeg, cpu_state.eaaddr, save_reg_lo);
writememw(easeg, cpu_state.eaaddr + 8, save_reg_hi);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fbstp) : (x87_timings.fbstp * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fbstp) : (x87_concurrency.fbstp * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FBSTP_PACKED_BCD_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
uint16_t save_reg_hi = 0xffff;
uint64_t save_reg_lo = BX_CONST64(0xC000000000000000);
floatx80 reg;
int64_t save_val;
int sign;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
reg = FPU_read_regi(0);
save_val = extF80_to_i64_normal(reg, &status);
sign = extF80_sign(reg);
if (sign)
save_val = -save_val;
if (save_val > BX_CONST64(999999999999999999))
softfloat_setFlags(&status, softfloat_flag_invalid); // throw away other flags
if (!(status.softfloat_exceptionFlags & softfloat_flag_invalid)) {
save_reg_hi = sign ? 0x8000 : 0;
save_reg_lo = 0;
for (int i = 0; i < 16; i++) {
save_reg_lo += ((uint64_t) (save_val % 10)) << (4 * i);
save_val /= 10;
}
save_reg_hi += (uint16_t) (save_val % 10);
save_val /= 10;
save_reg_hi += (uint16_t) (save_val % 10) << 4;
}
/* check for fpu arithmetic exceptions */
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
// write packed bcd to memory
writememq(easeg, cpu_state.eaaddr, save_reg_lo);
writememw(easeg, cpu_state.eaaddr + 8, save_reg_hi);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fbstp) : (x87_timings.fbstp * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fbstp) : (x87_concurrency.fbstp * cpu_multi));
return 0;
}
#endif
static int
sf_FSTs_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float32 save_reg = float32_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f32(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_32) : (x87_timings.fst_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_32) : (x87_concurrency.fst_32 * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FSTs_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float32 save_reg = float32_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f32(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_32) : (x87_timings.fst_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_32) : (x87_concurrency.fst_32 * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FSTPs_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float32 save_reg = float32_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f32(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_32) : (x87_timings.fst_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_32) : (x87_concurrency.fst_32 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FSTPs_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float32 save_reg = float32_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f32(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteal(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_32) : (x87_timings.fst_32 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_32) : (x87_concurrency.fst_32 * cpu_multi));
return 0;
}
#endif
static int
sf_FSTd_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float64 save_reg = float64_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f64(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaq(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_64) : (x87_timings.fst_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_64) : (x87_concurrency.fst_64 * cpu_multi));
return cpu_state.abrt;
}
#ifndef FPU_8087
static int
sf_FSTd_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float64 save_reg = float64_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f64(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaq(save_reg);
fpu_state.swd = sw;
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_64) : (x87_timings.fst_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_64) : (x87_concurrency.fst_64 * cpu_multi));
return cpu_state.abrt;
}
#endif
static int
sf_FSTPd_a16(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float64 save_reg = float64_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked()) {
goto next_ins;
}
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f64(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1)) {
goto next_ins;
}
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaq(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_64) : (x87_timings.fst_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_64) : (x87_concurrency.fst_64 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FSTPd_a32(uint32_t fetchdat)
{
struct softfloat_status_t status;
uint16_t sw = fpu_state.swd;
float64 save_reg = float64_default_nan;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
save_reg = extF80_to_f64(FPU_read_regi(0), &status);
if (FPU_exception(fetchdat, status.softfloat_exceptionFlags, 1))
goto next_ins;
}
// store to the memory might generate an exception, in this case original FPU_SW must be kept
swap_values16u(sw, fpu_state.swd);
seteaq(save_reg);
if (cpu_state.abrt)
return 1;
fpu_state.swd = sw;
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_64) : (x87_timings.fst_64 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_64) : (x87_concurrency.fst_64 * cpu_multi));
return 0;
}
#endif
static int
sf_FSTPe_a16(uint32_t fetchdat)
{
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
floatx80 save_reg;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
save_reg = floatx80_default_nan;
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked()) {
goto next_ins;
}
} else {
save_reg = FPU_read_regi(0);
}
writememq(easeg, cpu_state.eaaddr, save_reg.signif);
writememw(easeg, cpu_state.eaaddr + 8, save_reg.signExp);
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_80) : (x87_timings.fst_80 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_80) : (x87_concurrency.fst_80 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FSTPe_a32(uint32_t fetchdat)
{
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
floatx80 save_reg;
FP_ENTER();
FPU_check_pending_exceptions();
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
if (cpu_state.abrt)
return 1;
save_reg = floatx80_default_nan;
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (!is_IA_masked())
goto next_ins;
} else {
save_reg = FPU_read_regi(0);
}
writememq(easeg, cpu_state.eaaddr, save_reg.signif);
writememw(easeg, cpu_state.eaaddr + 8, save_reg.signExp);
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst_80) : (x87_timings.fst_80 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst_80) : (x87_concurrency.fst_80 * cpu_multi));
return 0;
}
#endif
static int
sf_FST_sti(uint32_t fetchdat)
{
floatx80 st0_reg;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_stack_underflow(fetchdat, fetchdat & 7, 0);
} else {
st0_reg = FPU_read_regi(0);
FPU_save_regi(st0_reg, fetchdat & 7);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst) : (x87_timings.fst * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst) : (x87_concurrency.fst * cpu_multi));
return 0;
}
static int
sf_FSTP_sti(uint32_t fetchdat)
{
floatx80 st0_reg;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_pop();
} else {
st0_reg = FPU_read_regi(0);
FPU_save_regi(st0_reg, fetchdat & 7);
FPU_pop();
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fst) : (x87_timings.fst * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fst) : (x87_concurrency.fst * cpu_multi));
return 0;
}
#ifndef FPU_8087
# ifndef OPS_286_386
# define sf_FCMOV(condition) \
static int sf_FCMOV##condition(uint32_t fetchdat) \
{ \
FP_ENTER(); \
FPU_check_pending_exceptions(); \
cpu_state.pc++; \
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) \
FPU_stack_underflow(fetchdat, 0, 0); \
else { \
if (cond_##condition) { \
FPU_save_regi(FPU_read_regi(fetchdat & 7), 0); \
} \
} \
CLOCK_CYCLES_FPU(4); \
return 0; \
}
# define cond_U (PF_SET())
# define cond_NU (!PF_SET())
// clang-format off
sf_FCMOV(B)
sf_FCMOV(E)
sf_FCMOV(BE)
sf_FCMOV(U)
sf_FCMOV(NB)
sf_FCMOV(NE)
sf_FCMOV(NBE)
sf_FCMOV(NU)
// clang-format on
# endif
#endif
``` | /content/code_sandbox/src/cpu/x87_ops_sf_load_store.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 13,512 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* Common 386 CPU code.
*
*
*
* Authors: Sarah Walker, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#ifndef _386_COMMON_H_
#define _386_COMMON_H_
#include <stddef.h>
#include <inttypes.h>
#ifdef OPS_286_386
# define readmemb_n(s, a, b) readmembl_no_mmut_2386((s) + (a), b)
# define readmemw_n(s, a, b) readmemwl_no_mmut_2386((s) + (a), b)
# define readmeml_n(s, a, b) readmemll_no_mmut_2386((s) + (a), b)
# define readmemb(s, a) readmembl_2386((s) + (a))
# define readmemw(s, a) readmemwl_2386((s) + (a))
# define readmeml(s, a) readmemll_2386((s) + (a))
# define readmemq(s, a) readmemql_2386((s) + (a))
# define writememb_n(s, a, b, v) writemembl_no_mmut_2386((s) + (a), b, v)
# define writememw_n(s, a, b, v) writememwl_no_mmut_2386((s) + (a), b, v)
# define writememl_n(s, a, b, v) writememll_no_mmut_2386((s) + (a), b, v)
# define writememb(s, a, v) writemembl_2386((s) + (a), v)
# define writememw(s, a, v) writememwl_2386((s) + (a), v)
# define writememl(s, a, v) writememll_2386((s) + (a), v)
# define writememq(s, a, v) writememql_2386((s) + (a), v)
# define do_mmut_rb(s, a, b) do_mmutranslate_2386((s) + (a), b, 1, 0)
# define do_mmut_rw(s, a, b) do_mmutranslate_2386((s) + (a), b, 2, 0)
# define do_mmut_rl(s, a, b) do_mmutranslate_2386((s) + (a), b, 4, 0)
# define do_mmut_rb2(s, a, b) do_mmutranslate_2386((s) + (a), b, 1, 0)
# define do_mmut_rw2(s, a, b) do_mmutranslate_2386((s) + (a), b, 2, 0)
# define do_mmut_rl2(s, a, b) do_mmutranslate_2386((s) + (a), b, 4, 0)
# define do_mmut_wb(s, a, b) do_mmutranslate_2386((s) + (a), b, 1, 1)
# define do_mmut_ww(s, a, b) do_mmutranslate_2386((s) + (a), b, 2, 1)
# define do_mmut_wl(s, a, b) do_mmutranslate_2386((s) + (a), b, 4, 1)
#elif defined(USE_DEBUG_REGS_486)
# define readmemb_n(s, a, b) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) ? readmembl_no_mmut((s) + (a), b) : *(uint8_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))))
# define readmemw_n(s, a, b) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF) || (((s) + (a)) & 1)) ? readmemwl_no_mmut((s) + (a), b) : *(uint16_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmeml_n(s, a, b) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF) || (((s) + (a)) & 3)) ? readmemll_no_mmut((s) + (a), b) : *(uint32_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmemb(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) ? readmembl((s) + (a)) : *(uint8_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))))
# define readmemw(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF) || (((s) + (a)) & 1)) ? readmemwl((s) + (a)) : *(uint16_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmeml(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF) || (((s) + (a)) & 3)) ? readmemll((s) + (a)) : *(uint32_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmemq(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF) || (((s) + (a)) & 7)) ? readmemql((s) + (a)) : *(uint64_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))))
# define writememb_n(s, a, b, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) \
writemembl_no_mmut((s) + (a), b, v); \
else \
*(uint8_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememw_n(s, a, b, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1) || (dr[7] & 0xFF)) \
writememwl_no_mmut((s) + (a), b, v); \
else \
*(uint16_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememl_n(s, a, b, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3) || (dr[7] & 0xFF)) \
writememll_no_mmut((s) + (a), b, v); \
else \
*(uint32_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememb(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) \
writemembl((s) + (a), v); \
else \
*(uint8_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememw(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1) || (dr[7] & 0xFF)) \
writememwl((s) + (a), v); \
else \
*(uint16_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememl(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3) || (dr[7] & 0xFF)) \
writememll((s) + (a), v); \
else \
*(uint32_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememq(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 7) || (dr[7] & 0xFF)) \
writememql((s) + (a), v); \
else \
*(uint64_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define do_mmut_rb(s, a, b) \
if (readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 1, 0)
# define do_mmut_rw(s, a, b) \
if (readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1) || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 2, 0)
# define do_mmut_rl(s, a, b) \
if (readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3) || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 4, 0)
# define do_mmut_rb2(s, a, b) \
old_rl2 = readlookup2[(uint32_t) ((s) + (a)) >> 12]; \
if (old_rl2 == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 1, 0)
# define do_mmut_rw2(s, a, b) \
old_rl2 = readlookup2[(uint32_t) ((s) + (a)) >> 12]; \
if (old_rl2 == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1) || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 2, 0)
# define do_mmut_rl2(s, a, b) \
old_rl2 = readlookup2[(uint32_t) ((s) + (a)) >> 12]; \
if (old_rl2 == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3) || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 4, 0)
# define do_mmut_wb(s, a, b) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 1, 1)
# define do_mmut_ww(s, a, b) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1) || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 2, 1)
# define do_mmut_wl(s, a, b) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3) || (dr[7] & 0xFF)) \
do_mmutranslate((s) + (a), b, 4, 1)
#else
# define readmemb_n(s, a, b) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) ? readmembl_no_mmut((s) + (a), b) : *(uint8_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))))
# define readmemw_n(s, a, b) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) ? readmemwl_no_mmut((s) + (a), b) : *(uint16_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmeml_n(s, a, b) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) ? readmemll_no_mmut((s) + (a), b) : *(uint32_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmemb(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) ? readmembl((s) + (a)) : *(uint8_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))))
# define readmemw(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) ? readmemwl((s) + (a)) : *(uint16_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmeml(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) ? readmemll((s) + (a)) : *(uint32_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uint32_t) ((s) + (a))))
# define readmemq(s, a) ((readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 7)) ? readmemql((s) + (a)) : *(uint64_t *) (readlookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))))
# define writememb_n(s, a, b, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) \
writemembl_no_mmut((s) + (a), b, v); \
else \
*(uint8_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememw_n(s, a, b, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) \
writememwl_no_mmut((s) + (a), b, v); \
else \
*(uint16_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememl_n(s, a, b, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) \
writememll_no_mmut((s) + (a), b, v); \
else \
*(uint32_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememb(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) \
writemembl((s) + (a), v); \
else \
*(uint8_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememw(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) \
writememwl((s) + (a), v); \
else \
*(uint16_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememl(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) \
writememll((s) + (a), v); \
else \
*(uint32_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define writememq(s, a, v) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 7)) \
writememql((s) + (a), v); \
else \
*(uint64_t *) (writelookup2[(uint32_t) ((s) + (a)) >> 12] + (uintptr_t) ((s) + (a))) = v
# define do_mmut_rb(s, a, b) \
if (readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) \
do_mmutranslate((s) + (a), b, 1, 0)
# define do_mmut_rw(s, a, b) \
if (readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) \
do_mmutranslate((s) + (a), b, 2, 0)
# define do_mmut_rl(s, a, b) \
if (readlookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) \
do_mmutranslate((s) + (a), b, 4, 0)
# define do_mmut_rb2(s, a, b) \
old_rl2 = readlookup2[(uint32_t) ((s) + (a)) >> 12]; \
if (old_rl2 == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) \
do_mmutranslate((s) + (a), b, 1, 0)
# define do_mmut_rw2(s, a, b) \
old_rl2 = readlookup2[(uint32_t) ((s) + (a)) >> 12]; \
if (old_rl2 == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) \
do_mmutranslate((s) + (a), b, 2, 0)
# define do_mmut_rl2(s, a, b) \
old_rl2 = readlookup2[(uint32_t) ((s) + (a)) >> 12]; \
if (old_rl2 == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) \
do_mmutranslate((s) + (a), b, 4, 0)
# define do_mmut_wb(s, a, b) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF) \
do_mmutranslate((s) + (a), b, 1, 1)
# define do_mmut_ww(s, a, b) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 1)) \
do_mmutranslate((s) + (a), b, 2, 1)
# define do_mmut_wl(s, a, b) \
if (writelookup2[(uint32_t) ((s) + (a)) >> 12] == (uintptr_t) LOOKUP_INV || (s) == 0xFFFFFFFF || (((s) + (a)) & 3)) \
do_mmutranslate((s) + (a), b, 4, 1)
#endif
int checkio(uint32_t port, int mask);
#define check_io_perm(port, size) \
if (msw & 1 && ((CPL > IOPL) || (cpu_state.eflags & VM_FLAG))) { \
int tempi = checkio(port, (1 << size) - 1); \
if (cpu_state.abrt) \
return 1; \
if (tempi) { \
if (cpu_state.eflags & VM_FLAG) \
x86gpf_expected(NULL, 0); \
else \
x86gpf(NULL, 0); \
return 1; \
} \
}
#define SEG_CHECK_READ(seg) \
do { \
if ((seg)->base == 0xffffffff) { \
x86gpf("Segment can't read", 0); \
return 1; \
} \
} while (0)
#define SEG_CHECK_WRITE(seg) \
do { \
if ((seg)->base == 0xffffffff) { \
x86gpf("Segment can't write", 0); \
return 1; \
} \
} while (0)
#define CHECK_READ(chseg, low, high) \
if ((low < (chseg)->limit_low) || (high > (chseg)->limit_high) || ((msw & 1) && !(cpu_state.eflags & VM_FLAG) && (((chseg)->access & 10) == 8))) { \
x86gpf("Limit check (READ)", 0); \
return 1; \
} \
if (msw & 1 && !(cpu_state.eflags & VM_FLAG) && !((chseg)->access & 0x80)) { \
if ((chseg) == &cpu_state.seg_ss) \
x86ss(NULL, (chseg)->seg & 0xfffc); \
else \
x86np("Read from seg not present", (chseg)->seg & 0xfffc); \
return 1; \
}
#define CHECK_READ_REP(chseg, low, high) \
if ((low < (chseg)->limit_low) || (high > (chseg)->limit_high)) { \
x86gpf("Limit check (READ)", 0); \
break; \
} \
if (msw & 1 && !(cpu_state.eflags & VM_FLAG) && !((chseg)->access & 0x80)) { \
if ((chseg) == &cpu_state.seg_ss) \
x86ss(NULL, (chseg)->seg & 0xfffc); \
else \
x86np("Read from seg not present", (chseg)->seg & 0xfffc); \
break; \
}
#define CHECK_WRITE_COMMON(chseg, low, high) \
if ((low < (chseg)->limit_low) || (high > (chseg)->limit_high) || !((chseg)->access & 2) || ((msw & 1) && !(cpu_state.eflags & VM_FLAG) && ((chseg)->access & 8))) { \
x86gpf("Limit check (WRITE)", 0); \
return 1; \
} \
if (msw & 1 && !(cpu_state.eflags & VM_FLAG) && !((chseg)->access & 0x80)) { \
if ((chseg) == &cpu_state.seg_ss) \
x86ss(NULL, (chseg)->seg & 0xfffc); \
else \
x86np("Write to seg not present", (chseg)->seg & 0xfffc); \
return 1; \
}
#define CHECK_WRITE(chseg, low, high) \
CHECK_WRITE_COMMON(chseg, low, high)
#define CHECK_WRITE_REP(chseg, low, high) \
if ((low < (chseg)->limit_low) || (high > (chseg)->limit_high)) { \
x86gpf("Limit check (WRITE REP)", 0); \
break; \
} \
if (msw & 1 && !(cpu_state.eflags & VM_FLAG) && !((chseg)->access & 0x80)) { \
if ((chseg) == &cpu_state.seg_ss) \
x86ss(NULL, (chseg)->seg & 0xfffc); \
else \
x86np("Write (REP) to seg not present", (chseg)->seg & 0xfffc); \
break; \
}
#define NOTRM \
if (!(msw & 1) || (cpu_state.eflags & VM_FLAG)) { \
x86_int(6); \
return 1; \
}
#ifdef OPS_286_386
/* TODO: Introduce functions to read exec. */
static __inline uint8_t
fastreadb(uint32_t a)
{
uint8_t ret;
read_type = 1;
ret = readmembl_2386(a);
read_type = 4;
if (cpu_state.abrt)
return 0;
return ret;
}
static __inline uint16_t
fastreadw(uint32_t a)
{
uint16_t ret;
read_type = 1;
ret = readmemwl_2386(a);
read_type = 4;
if (cpu_state.abrt)
return 0;
return ret;
}
static __inline uint32_t
fastreadl(uint32_t a)
{
uint32_t ret;
read_type = 1;
ret = readmemll_2386(a);
read_type = 4;
if (cpu_state.abrt)
return 0;
return ret;
}
#else
static __inline uint8_t
fastreadb(uint32_t a)
{
uint8_t *t;
# ifdef USE_DEBUG_REGS_486
read_type = 1;
mem_debug_check_addr(a, read_type);
read_type = 4;
# endif
if ((a >> 12) == pccache)
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint8_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint8_t *) &pccache2[a]);
# endif
t = getpccache(a);
if (cpu_state.abrt)
return 0;
pccache = a >> 12;
pccache2 = t;
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint8_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint8_t *) &pccache2[a]);
# endif
}
static __inline uint16_t
fastreadw(uint32_t a)
{
uint8_t *t;
uint16_t val;
# ifdef USE_DEBUG_REGS_486
read_type = 1;
mem_debug_check_addr(a, read_type);
mem_debug_check_addr(a + 1, read_type);
read_type = 4;
# endif
if ((a & 0xFFF) > 0xFFE) {
val = fastreadb(a);
val |= (fastreadb(a + 1) << 8);
return val;
}
if ((a >> 12) == pccache)
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint16_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint16_t *) &pccache2[a]);
# endif
t = getpccache(a);
if (cpu_state.abrt)
return 0;
pccache = a >> 12;
pccache2 = t;
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint16_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint16_t *) &pccache2[a]);
# endif
}
static __inline uint32_t
fastreadl(uint32_t a)
{
uint8_t *t;
uint32_t val;
# ifdef USE_DEBUG_REGS_486
int i;
read_type = 1;
for (i = 0; i < 4; i++) {
mem_debug_check_addr(a + i, read_type);
}
read_type = 4;
# endif
if ((a & 0xFFF) < 0xFFD) {
if ((a >> 12) != pccache) {
t = getpccache(a);
if (cpu_state.abrt)
return 0;
pccache2 = t;
pccache = a >> 12;
}
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint32_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint32_t *) &pccache2[a]);
# endif
}
val = fastreadw(a);
val |= (fastreadw(a + 2) << 16);
return val;
}
#endif
static __inline void *
get_ram_ptr(uint32_t a)
{
if ((a >> 12) == pccache)
#if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return (void *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL));
#else
return &pccache2[a];
#endif
else {
uint8_t *t = getpccache(a);
#if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return (void *) (((uintptr_t) &t[a] & 0x00000000ffffffffULL) | ((uintptr_t) &t[0] & 0xffffffff00000000ULL));
#else
return &t[a];
#endif
}
}
extern int opcode_has_modrm[256];
extern int opcode_length[256];
#ifdef OPS_286_386
static __inline uint16_t
fastreadw_fetch(uint32_t a)
{
uint16_t ret;
if ((a & 0xFFF) > 0xFFE) {
ret = fastreadb(a);
if (!cpu_state.abrt && (opcode_length[ret & 0xff] > 1))
ret |= ((uint16_t) fastreadb(a + 1) << 8);
} else if (cpu_state.abrt)
ret = 0;
else {
read_type = 1;
ret = readmemwl_2386(a);
read_type = 4;
}
return ret;
}
static __inline uint32_t
fastreadl_fetch(uint32_t a)
{
uint32_t ret;
if (cpu_16bitbus || ((a & 0xFFF) > 0xFFC)) {
ret = fastreadw_fetch(a);
if (!cpu_state.abrt && (opcode_length[ret & 0xff] > 2))
ret |= ((uint32_t) fastreadw(a + 2) << 16);
} else if (cpu_state.abrt)
ret = 0;
else {
read_type = 1;
ret = readmemll_2386(a);
read_type = 4;
}
return ret;
}
#else
static __inline uint16_t
fastreadw_fetch(uint32_t a)
{
uint8_t *t;
uint16_t val;
# ifdef USE_DEBUG_REGS_486
read_type = 1;
mem_debug_check_addr(a, read_type);
mem_debug_check_addr(a + 1, read_type);
read_type = 4;
# endif
if ((a & 0xFFF) > 0xFFE) {
val = fastreadb(a);
if (opcode_length[val & 0xff] > 1)
val |= (fastreadb(a + 1) << 8);
return val;
}
if ((a >> 12) == pccache)
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint16_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint16_t *) &pccache2[a]);
# endif
t = getpccache(a);
if (cpu_state.abrt)
return 0;
pccache = a >> 12;
pccache2 = t;
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint16_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint16_t *) &pccache2[a]);
# endif
}
static __inline uint32_t
fastreadl_fetch(uint32_t a)
{
uint8_t *t;
uint32_t val;
# ifdef USE_DEBUG_REGS_486
int i;
read_type = 1;
for (i = 0; i < 4; i++) {
mem_debug_check_addr(a + i, read_type);
}
read_type = 4;
# endif
if ((a & 0xFFF) < 0xFFD) {
if ((a >> 12) != pccache) {
t = getpccache(a);
if (cpu_state.abrt)
return 0;
pccache2 = t;
pccache = a >> 12;
}
# if (defined __amd64__ || defined _M_X64 || defined __aarch64__ || defined _M_ARM64)
return *((uint32_t *) (((uintptr_t) &pccache2[a] & 0x00000000ffffffffULL) | ((uintptr_t) &pccache2[0] & 0xffffffff00000000ULL)));
# else
return *((uint32_t *) &pccache2[a]);
# endif
}
val = fastreadw_fetch(a);
if (opcode_length[val & 0xff] > 2)
val |= (fastreadw(a + 2) << 16);
return val;
}
#endif
static __inline uint8_t
getbyte(void)
{
cpu_state.pc++;
return fastreadb(cs + (cpu_state.pc - 1));
}
static __inline uint16_t
getword(void)
{
cpu_state.pc += 2;
return fastreadw(cs + (cpu_state.pc - 2));
}
static __inline uint32_t
getlong(void)
{
cpu_state.pc += 4;
return fastreadl(cs + (cpu_state.pc - 4));
}
static __inline uint64_t
getquad(void)
{
cpu_state.pc += 8;
return fastreadl(cs + (cpu_state.pc - 8)) | ((uint64_t) fastreadl(cs + (cpu_state.pc - 4)) << 32);
}
#ifdef OPS_286_386
static __inline uint8_t
geteab(void)
{
if (cpu_mod == 3)
return (cpu_rm & 4) ? cpu_state.regs[cpu_rm & 3].b.h : cpu_state.regs[cpu_rm & 3].b.l;
return readmemb(easeg, cpu_state.eaaddr);
}
static __inline uint16_t
geteaw(void)
{
if (cpu_mod == 3)
return cpu_state.regs[cpu_rm].w;
return readmemw(easeg, cpu_state.eaaddr);
}
static __inline uint32_t
geteal(void)
{
if (cpu_mod == 3)
return cpu_state.regs[cpu_rm].l;
return readmeml(easeg, cpu_state.eaaddr);
}
static __inline uint64_t
geteaq(void)
{
return readmemq(easeg, cpu_state.eaaddr);
}
static __inline uint8_t
geteab_mem(void)
{
return readmemb(easeg, cpu_state.eaaddr);
}
static __inline uint16_t
geteaw_mem(void)
{
return readmemw(easeg, cpu_state.eaaddr);
}
static __inline uint32_t
geteal_mem(void)
{
return readmeml(easeg, cpu_state.eaaddr);
}
static __inline int
seteaq_cwc(void)
{
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr);
return 0;
}
static __inline void
seteaq(uint64_t v)
{
if (seteaq_cwc())
return;
writememql(easeg + cpu_state.eaaddr, v);
}
# define seteab(v) \
if (cpu_mod != 3) { \
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr); \
writemembl_2386(easeg + cpu_state.eaaddr, v); \
} else if (cpu_rm & 4) \
cpu_state.regs[cpu_rm & 3].b.h = v; \
else \
cpu_state.regs[cpu_rm].b.l = v
# define seteaw(v) \
if (cpu_mod != 3) { \
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1); \
writememwl_2386(easeg + cpu_state.eaaddr, v); \
} else \
cpu_state.regs[cpu_rm].w = v
# define seteal(v) \
if (cpu_mod != 3) { \
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3); \
writememll_2386(easeg + cpu_state.eaaddr, v); \
} else \
cpu_state.regs[cpu_rm].l = v
# define seteab_mem(v) writemembl_2386(easeg + cpu_state.eaaddr, v);
# define seteaw_mem(v) writememwl_2386(easeg + cpu_state.eaaddr, v);
# define seteal_mem(v) writememll_2386(easeg + cpu_state.eaaddr, v);
#else
static __inline uint8_t
geteab(void)
{
if (cpu_mod == 3)
return (cpu_rm & 4) ? cpu_state.regs[cpu_rm & 3].b.h : cpu_state.regs[cpu_rm & 3].b.l;
if (eal_r)
return *(uint8_t *) eal_r;
return readmemb(easeg, cpu_state.eaaddr);
}
static __inline uint16_t
geteaw(void)
{
if (cpu_mod == 3)
return cpu_state.regs[cpu_rm].w;
if (eal_r)
return *(uint16_t *) eal_r;
return readmemw(easeg, cpu_state.eaaddr);
}
static __inline uint32_t
geteal(void)
{
if (cpu_mod == 3)
return cpu_state.regs[cpu_rm].l;
if (eal_r)
return *eal_r;
return readmeml(easeg, cpu_state.eaaddr);
}
static __inline uint64_t
geteaq(void)
{
return readmemq(easeg, cpu_state.eaaddr);
}
static __inline uint8_t
geteab_mem(void)
{
if (eal_r)
return *(uint8_t *) eal_r;
return readmemb(easeg, cpu_state.eaaddr);
}
static __inline uint16_t
geteaw_mem(void)
{
if (eal_r)
return *(uint16_t *) eal_r;
return readmemw(easeg, cpu_state.eaaddr);
}
static __inline uint32_t
geteal_mem(void)
{
if (eal_r)
return *eal_r;
return readmeml(easeg, cpu_state.eaaddr);
}
static __inline int
seteaq_cwc(void)
{
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr);
return 0;
}
static __inline void
seteaq(uint64_t v)
{
if (seteaq_cwc())
return;
writememql(easeg + cpu_state.eaaddr, v);
}
# define seteab(v) \
if (cpu_mod != 3) { \
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr); \
if (eal_w) \
*(uint8_t *) eal_w = v; \
else \
writemembl(easeg + cpu_state.eaaddr, v); \
} else if (cpu_rm & 4) \
cpu_state.regs[cpu_rm & 3].b.h = v; \
else \
cpu_state.regs[cpu_rm].b.l = v
# define seteaw(v) \
if (cpu_mod != 3) { \
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 1); \
if (eal_w) \
*(uint16_t *) eal_w = v; \
else \
writememwl(easeg + cpu_state.eaaddr, v); \
} else \
cpu_state.regs[cpu_rm].w = v
# define seteal(v) \
if (cpu_mod != 3) { \
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3); \
if (eal_w) \
*eal_w = v; \
else \
writememll(easeg + cpu_state.eaaddr, v); \
} else \
cpu_state.regs[cpu_rm].l = v
# define seteab_mem(v) \
if (eal_w) \
*(uint8_t *) eal_w = v; \
else \
writemembl(easeg + cpu_state.eaaddr, v);
# define seteaw_mem(v) \
if (eal_w) \
*(uint16_t *) eal_w = v; \
else \
writememwl(easeg + cpu_state.eaaddr, v);
# define seteal_mem(v) \
if (eal_w) \
*eal_w = v; \
else \
writememll(easeg + cpu_state.eaaddr, v);
#endif
#define getbytef() \
((uint8_t) (fetchdat)); \
cpu_state.pc++
#define getwordf() \
((uint16_t) (fetchdat)); \
cpu_state.pc += 2
#define getbyte2f() \
((uint8_t) (fetchdat >> 8)); \
cpu_state.pc++
#define getword2f() \
((uint16_t) (fetchdat >> 8)); \
cpu_state.pc += 2
#endif
/* Resume Flag handling. */
extern int rf_flag_no_clear;
int cpu_386_check_instruction_fault(void);
``` | /content/code_sandbox/src/cpu/386_common.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 11,701 |
```objective-c
static int
opARPL_a16(uint32_t fetchdat)
{
uint16_t temp_seg;
NOTRM
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp_seg = geteaw();
if (cpu_state.abrt)
return 1;
flags_rebuild();
if ((temp_seg & 3) < (cpu_state.regs[cpu_reg].w & 3)) {
temp_seg = (temp_seg & 0xfffc) | (cpu_state.regs[cpu_reg].w & 3);
seteaw(temp_seg);
if (cpu_state.abrt)
return 1;
cpu_state.flags |= Z_FLAG;
} else
cpu_state.flags &= ~Z_FLAG;
CLOCK_CYCLES(is486 ? 9 : 20);
PREFETCH_RUN(is486 ? 9 : 20, 2, rmdat, 1, 0, 1, 0, 0);
return 0;
}
static int
opARPL_a32(uint32_t fetchdat)
{
uint16_t temp_seg;
NOTRM
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp_seg = geteaw();
if (cpu_state.abrt)
return 1;
flags_rebuild();
if ((temp_seg & 3) < (cpu_state.regs[cpu_reg].w & 3)) {
temp_seg = (temp_seg & 0xfffc) | (cpu_state.regs[cpu_reg].w & 3);
seteaw(temp_seg);
if (cpu_state.abrt)
return 1;
cpu_state.flags |= Z_FLAG;
} else
cpu_state.flags &= ~Z_FLAG;
CLOCK_CYCLES(is486 ? 9 : 20);
PREFETCH_RUN(is486 ? 9 : 20, 2, rmdat, 1, 0, 1, 0, 1);
return 0;
}
#define opLAR(name, fetch_ea, is32, ea32) \
static int opLAR_##name(uint32_t fetchdat) \
{ \
int valid; \
uint16_t sel, desc = 0; \
\
NOTRM \
fetch_ea(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
\
sel = geteaw(); \
if (cpu_state.abrt) \
return 1; \
\
flags_rebuild(); \
if (!(sel & 0xfffc)) { \
cpu_state.flags &= ~Z_FLAG; \
return 0; \
} /*Null selector*/ \
valid = (sel & ~7) < ((sel & 4) ? ldt.limit : gdt.limit); \
if (valid) { \
cpl_override = 1; \
desc = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 4); \
cpl_override = 0; \
if (cpu_state.abrt) \
return 1; \
} \
cpu_state.flags &= ~Z_FLAG; \
if ((desc & 0x1f00) == 0x000) \
valid = 0; \
if ((desc & 0x1f00) == 0x800) \
valid = 0; \
if ((desc & 0x1f00) == 0xa00) \
valid = 0; \
if ((desc & 0x1f00) == 0xd00) \
valid = 0; \
if ((desc & 0x1c00) < 0x1c00) /*Exclude conforming code segments*/ \
{ \
int dpl = (desc >> 13) & 3; \
if (dpl < CPL || dpl < (sel & 3)) \
valid = 0; \
} \
if (valid) { \
cpu_state.flags |= Z_FLAG; \
cpl_override = 1; \
if (is32) \
cpu_state.regs[cpu_reg].l = readmeml(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 4) & 0xffff00; \
else \
cpu_state.regs[cpu_reg].w = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 4) & 0xff00; \
cpl_override = 0; \
} \
CLOCK_CYCLES(11); \
PREFETCH_RUN(11, 2, rmdat, 2, 0, 0, 0, ea32); \
return cpu_state.abrt; \
}
opLAR(w_a16, fetch_ea_16, 0, 0)
opLAR(w_a32, fetch_ea_32, 0, 1)
opLAR(l_a16, fetch_ea_16, 1, 0)
opLAR(l_a32, fetch_ea_32, 1, 1)
#define opLSL(name, fetch_ea, is32, ea32) \
static int opLSL_##name(uint32_t fetchdat) \
{ \
int valid; \
uint16_t sel, desc = 0; \
\
NOTRM \
fetch_ea(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
\
sel = geteaw(); \
if (cpu_state.abrt) \
return 1; \
flags_rebuild(); \
cpu_state.flags &= ~Z_FLAG; \
if (!(sel & 0xfffc)) \
return 0; /*Null selector*/ \
valid = (sel & ~7) < ((sel & 4) ? ldt.limit : gdt.limit); \
if (valid) { \
cpl_override = 1; \
desc = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 4); \
cpl_override = 0; \
if (cpu_state.abrt) \
return 1; \
} \
if ((desc & 0x1400) == 0x400) \
valid = 0; /*Interrupt or trap or call gate*/ \
if ((desc & 0x1f00) == 0x000) \
valid = 0; /*Invalid*/ \
if ((desc & 0x1f00) == 0xa00) \
valid = 0; /*Invalid*/ \
if ((desc & 0x1c00) != 0x1c00) /*Exclude conforming code segments*/ \
{ \
int rpl = (desc >> 13) & 3; \
if (rpl < CPL || rpl < (sel & 3)) \
valid = 0; \
} \
if (valid) { \
cpu_state.flags |= Z_FLAG; \
cpl_override = 1; \
if (is32) { \
cpu_state.regs[cpu_reg].l = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7)); \
cpu_state.regs[cpu_reg].l |= (readmemb(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 6) & 0xF) << 16; \
if (readmemb(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 6) & 0x80) { \
cpu_state.regs[cpu_reg].l <<= 12; \
cpu_state.regs[cpu_reg].l |= 0xFFF; \
} \
} else \
cpu_state.regs[cpu_reg].w = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7)); \
cpl_override = 0; \
} \
CLOCK_CYCLES(10); \
PREFETCH_RUN(10, 2, rmdat, 4, 0, 0, 0, ea32); \
return cpu_state.abrt; \
}
opLSL(w_a16, fetch_ea_16, 0, 0)
opLSL(w_a32, fetch_ea_32, 0, 1)
opLSL(l_a16, fetch_ea_16, 1, 0)
opLSL(l_a32, fetch_ea_32, 1, 1)
static int op0F00_common(uint32_t fetchdat, int ea32)
{
int dpl;
int valid;
int granularity;
uint32_t addr;
uint32_t base;
uint32_t limit;
uint16_t desc;
uint16_t sel;
uint8_t access;
uint8_t ar_high;
switch (rmdat & 0x38) {
case 0x00: /*SLDT*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(ldt.seg);
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 2, rmdat, 0, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32);
break;
case 0x08: /*STR*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(tr.seg);
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 2, rmdat, 0, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32);
break;
case 0x10: /*LLDT*/
if ((CPL || cpu_state.eflags & VM_FLAG) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
sel = geteaw();
if (cpu_state.abrt)
return 1;
addr = (sel & ~7) + gdt.base;
limit = readmemw(0, addr) + ((readmemb(0, addr + 6) & 0xf) << 16);
base = (readmemw(0, addr + 2)) | (readmemb(0, addr + 4) << 16) | (readmemb(0, addr + 7) << 24);
access = readmemb(0, addr + 5);
ar_high = readmemb(0, addr + 6);
granularity = readmemb(0, addr + 6) & 0x80;
if (cpu_state.abrt)
return 1;
ldt.limit = limit;
ldt.access = access;
ldt.ar_high = ar_high;
if (granularity) {
ldt.limit <<= 12;
ldt.limit |= 0xfff;
}
ldt.base = base;
ldt.seg = sel;
CLOCK_CYCLES(20);
PREFETCH_RUN(20, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 2, 0, 0, ea32);
break;
case 0x18: /*LTR*/
if ((CPL || cpu_state.eflags & VM_FLAG) && (cr0 & 1)) {
x86gpf(NULL, 0);
break;
}
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
sel = geteaw();
if (cpu_state.abrt)
return 1;
addr = (sel & ~7) + gdt.base;
limit = readmemw(0, addr) + ((readmemb(0, addr + 6) & 0xf) << 16);
base = (readmemw(0, addr + 2)) | (readmemb(0, addr + 4) << 16) | (readmemb(0, addr + 7) << 24);
access = readmemb(0, addr + 5);
ar_high = readmemb(0, addr + 6);
granularity = readmemb(0, addr + 6) & 0x80;
if (cpu_state.abrt)
return 1;
access |= 2;
writememb(0, addr + 5, access);
if (cpu_state.abrt)
return 1;
tr.seg = sel;
tr.limit = limit;
tr.access = access;
tr.ar_high = ar_high;
if (granularity) {
tr.limit <<= 12;
tr.limit |= 0xFFF;
}
tr.base = base;
CLOCK_CYCLES(20);
PREFETCH_RUN(20, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 2, 0, 0, ea32);
break;
case 0x20: /*VERR*/
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
sel = geteaw();
if (cpu_state.abrt)
return 1;
flags_rebuild();
cpu_state.flags &= ~Z_FLAG;
if (!(sel & 0xfffc))
return 0; /*Null selector*/
cpl_override = 1;
valid = (sel & ~7) < ((sel & 4) ? ldt.limit : gdt.limit);
desc = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 4);
cpl_override = 0;
if (cpu_state.abrt)
return 1;
if (!(desc & 0x1000))
valid = 0;
if ((desc & 0xC00) != 0xC00) /*Exclude conforming code segments*/
{
dpl = (desc >> 13) & 3; /*Check permissions*/
if (dpl < CPL || dpl < (sel & 3))
valid = 0;
}
if ((desc & 0x0800) && !(desc & 0x0200))
valid = 0; /*Non-readable code*/
if (valid)
cpu_state.flags |= Z_FLAG;
CLOCK_CYCLES(20);
PREFETCH_RUN(20, 2, rmdat, (cpu_mod == 3) ? 1 : 2, 0, 0, 0, ea32);
break;
case 0x28: /*VERW*/
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
sel = geteaw();
if (cpu_state.abrt)
return 1;
flags_rebuild();
cpu_state.flags &= ~Z_FLAG;
if (!(sel & 0xfffc))
return 0; /*Null selector*/
cpl_override = 1;
valid = (sel & ~7) < ((sel & 4) ? ldt.limit : gdt.limit);
desc = readmemw(0, ((sel & 4) ? ldt.base : gdt.base) + (sel & ~7) + 4);
cpl_override = 0;
if (cpu_state.abrt)
return 1;
if (!(desc & 0x1000))
valid = 0;
dpl = (desc >> 13) & 3; /*Check permissions*/
if (dpl < CPL || dpl < (sel & 3))
valid = 0;
if (desc & 0x0800)
valid = 0; /*Code*/
if (!(desc & 0x0200))
valid = 0; /*Read-only data*/
if (valid)
cpu_state.flags |= Z_FLAG;
CLOCK_CYCLES(20);
PREFETCH_RUN(20, 2, rmdat, (cpu_mod == 3) ? 1 : 2, 0, 0, 0, ea32);
break;
default:
cpu_state.pc -= 3;
x86illegal();
break;
}
return cpu_state.abrt;
}
static int
op0F00_a16(uint32_t fetchdat)
{
NOTRM
fetch_ea_16(fetchdat);
return op0F00_common(fetchdat, 0);
}
static int
op0F00_a32(uint32_t fetchdat)
{
NOTRM
fetch_ea_32(fetchdat);
return op0F00_common(fetchdat, 1);
}
static int
op0F01_common(uint32_t fetchdat, int is32, int is286, int ea32)
{
uint32_t base;
uint16_t limit;
uint16_t tempw;
switch (rmdat & 0x38) {
case 0x00: /*SGDT*/
ILLEGAL_ON(cpu_mod == 3);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(gdt.limit);
base = gdt.base; // is32 ? gdt.base : (gdt.base & 0xffffff);
if (is286)
base |= 0xff000000;
writememl(easeg, cpu_state.eaaddr + 2, base);
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 0, 0, 1, 1, ea32);
break;
case 0x08: /*SIDT*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(idt.limit);
base = idt.base;
if (is286)
base |= 0xff000000;
writememl(easeg, cpu_state.eaaddr + 2, base);
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 0, 0, 1, 1, ea32);
break;
case 0x10: /*LGDT*/
ILLEGAL_ON(cpu_mod == 3);
if ((CPL || cpu_state.eflags & VM_FLAG) && (cr0 & 1)) {
x86gpf(NULL, 0);
break;
}
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
limit = geteaw();
base = readmeml(0, easeg + cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
gdt.limit = limit;
gdt.base = base;
if (!is32)
gdt.base &= 0xffffff;
CLOCK_CYCLES(11);
PREFETCH_RUN(11, 2, rmdat, 1, 1, 0, 0, ea32);
break;
case 0x18: /*LIDT*/
if ((CPL || cpu_state.eflags & VM_FLAG) && (cr0 & 1)) {
x86gpf(NULL, 0);
break;
}
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
limit = geteaw();
base = readmeml(0, easeg + cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
idt.limit = limit;
idt.base = base;
if (!is32)
idt.base &= 0xffffff;
CLOCK_CYCLES(11);
PREFETCH_RUN(11, 2, rmdat, 1, 1, 0, 0, ea32);
break;
case 0x20: /*SMSW*/
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
if (is386 && is32 && (cpu_mod == 3)) {
if (is486 || isibm486)
seteaw(cr0);
else if (is386 && !cpu_16bitbus)
seteaw(cr0 | /* 0x7FFFFF00 */ 0x7FFFFFE0);
else
seteaw(cr0 | 0x7FFFFFF0);
} else {
if (is486 || isibm486)
seteaw(msw);
else if (is386 && !cpu_16bitbus)
seteaw(msw | /* 0xFF00 */ 0xFFE0);
else
seteaw(msw | 0xFFF0);
}
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 2, rmdat, 0, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32);
break;
case 0x30: /*LMSW*/
if ((CPL || cpu_state.eflags & VM_FLAG) && (msw & 1)) {
x86gpf(NULL, 0);
break;
}
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
tempw = geteaw();
if (cpu_state.abrt)
return 1;
if (msw & 1)
tempw |= 1;
if (is386) {
tempw &= ~0x10;
tempw |= (msw & 0x10);
} else
tempw &= 0xF;
msw = tempw;
if (msw & 1)
cpu_cur_status |= CPU_STATUS_PMODE;
else
cpu_cur_status &= ~CPU_STATUS_PMODE;
PREFETCH_RUN(2, 2, rmdat, 0, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32);
break;
case 0x38: /*INVLPG*/
if (is486 || isibm486) {
if ((CPL || cpu_state.eflags & VM_FLAG) && (cr0 & 1)) {
x86gpf(NULL, 0);
break;
}
SEG_CHECK_READ(cpu_state.ea_seg);
flushmmucache_nopc();
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 2, rmdat, 0, 0, 0, 0, ea32);
break;
}
default:
cpu_state.pc -= 3;
x86illegal();
break;
}
return cpu_state.abrt;
}
static int
op0F01_w_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
return op0F01_common(fetchdat, 0, 0, 0);
}
static int
op0F01_w_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
return op0F01_common(fetchdat, 0, 0, 1);
}
static int
op0F01_l_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
return op0F01_common(fetchdat, 1, 0, 0);
}
static int
op0F01_l_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
return op0F01_common(fetchdat, 1, 0, 1);
}
static int
op0F01_286(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
return op0F01_common(fetchdat, 0, 1, 0);
}
``` | /content/code_sandbox/src/cpu/x86_ops_pmode.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 5,546 |
```c
/*Elements taken into account :
- U/V integer pairing
- FPU/FXCH pairing
- Prefix decode delay (including shadowing)
- FPU latencies
- AGI stalls
Elements not taken into account :
- Branch prediction (beyond most simplistic approximation)
- PMMX decode queue
- MMX latencies
*/
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include <inttypes.h>
#include "x86.h"
#include "x86_ops.h"
#include "x87_sf.h"
#include "x87.h"
#include "codegen.h"
#include "codegen_ops.h"
#include "codegen_timing_common.h"
/*Instruction has different execution time for 16 and 32 bit data. Does not pair */
#define CYCLES_HAS_MULTI (1 << 28)
#define CYCLES_MULTI(c16, c32) (CYCLES_HAS_MULTI | c16 | (c32 << 8))
/*Instruction lasts given number of cycles. Does not pair*/
#define CYCLES(c) (c | PAIR_NP)
static int pair_timings[4][4] = {
/* Reg RM RMW Branch*/
/*Reg*/ {1, 2, 3, 2 },
/*RM*/
{ 2, 2, 3, 3 },
/*RMW*/
{ 3, 4, 5, 4 },
/*Branch*/
{ -1, -1, -1, -1}
};
/*Instruction follows either register timing, read-modify, or read-modify-write.
May be pairable*/
#define CYCLES_REG (0ull << 0)
#define CYCLES_RM (1ull << 0)
#define CYCLES_RMW (2ull << 0)
#define CYCLES_BRANCH (3ull << 0)
/*Instruction has immediate data. Can only be used with PAIR_U/PAIR_V/PAIR_UV*/
#define CYCLES_HASIMM (3ull << 2)
#define CYCLES_IMM8 (1ull << 2)
#define CYCLES_IMM1632 (2ull << 2)
#define CYCLES_MASK ((1ull << 7) - 1)
/*Instruction does not pair*/
#define PAIR_NP (0ull << 29)
/*Instruction pairs in U pipe only*/
#define PAIR_U (1ull << 29)
/*Instruction pairs in V pipe only*/
#define PAIR_V (2ull << 29)
/*Instruction pairs in both U and V pipes*/
#define PAIR_UV (3ull << 29)
/*Instruction pairs in U pipe only and only with FXCH*/
#define PAIR_FX (5ull << 29)
/*Instruction is FXCH and only pairs in V pipe with FX pairable instruction*/
#define PAIR_FXCH (6ull << 29)
#define PAIR_FPU (4ull << 29)
#define PAIR_MASK (7ull << 29)
/*comp_time = cycles until instruction complete
i_overlap = cycles that overlap with integer
f_overlap = cycles that overlap with subsequent FPU*/
#define FPU_CYCLES(comp_time, i_overlap, f_overlap) ((uint64_t) comp_time) | ((uint64_t) i_overlap << 41) | ((uint64_t) f_overlap << 49) | PAIR_FPU
#define FPU_COMP_TIME(timing) (timing & 0xff)
#define FPU_I_OVERLAP(timing) ((timing >> 41) & 0xff)
#define FPU_F_OVERLAP(timing) ((timing >> 49) & 0xff)
#define FPU_I_LATENCY(timing) (FPU_COMP_TIME(timing) - FPU_I_OVERLAP(timing))
#define FPU_F_LATENCY(timing) (FPU_I_OVERLAP(timing) - FPU_F_OVERLAP(timing))
#define FPU_RESULT_LATENCY(timing) ((timing >> 41) & 0xff)
#define INVALID 0
static int u_pipe_full;
static uint32_t u_pipe_opcode;
static uint64_t *u_pipe_timings;
static uint32_t u_pipe_op_32;
static uint32_t u_pipe_regmask;
static uint32_t u_pipe_fetchdat;
static int u_pipe_decode_delay_offset;
static uint64_t *u_pipe_deps;
static uint32_t regmask_modified;
static uint32_t addr_regmask;
static int fpu_latency;
static int fpu_st_latency[8];
static uint64_t opcode_timings_p6[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM,
/* ADD ADD PUSH ES POP ES*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* OR OR OR OR*/
PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM,
/* OR OR PUSH CS */
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(1), INVALID,
/* ADC ADC ADC ADC*/
/*10*/ PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
/* ADC ADC PUSH SS POP SS*/
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* SBB SBB SBB SBB*/
PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
/* SBB SBB PUSH DS POP DS*/
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* AND AND AND AND*/
/*20*/ PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM,
/* AND AND DAA*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* SUB SUB SUB SUB*/
PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM,
/* SUB SUB DAS*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* XOR XOR XOR XOR*/
/*30*/ PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM,
/* XOR XOR AAA*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* CMP CMP CMP CMP*/
PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM,
/* CMP CMP AAS*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* INC ESP INC EBP INC ESI INC EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* POP EAX POP ECX POP EDX POP EBX*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* POP ESP POP EBP POP ESI POP EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* PUSHA POPA BOUND ARPL*/
/*60*/ PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(8), PAIR_NP | CYCLES(7),
INVALID, INVALID, INVALID, INVALID,
/* PUSH imm IMUL PUSH imm IMUL*/
PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(10), PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(10),
/* INSB INSW OUTSB OUTSW*/
PAIR_NP | CYCLES(9), PAIR_NP | CYCLES(9), PAIR_NP | CYCLES(13), PAIR_NP | CYCLES(13),
/* Jxx*/
/*70*/ PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
/*80*/ INVALID, INVALID, INVALID, INVALID,
/* TEST TEST XCHG XCHG*/
PAIR_UV | CYCLES_RM, PAIR_UV | CYCLES_RM, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* MOV MOV MOV MOV*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV,
/* MOV from seg LEA MOV to seg POP*/
PAIR_NP | CYCLES(1), PAIR_UV | CYCLES_REG, CYCLES(3), PAIR_NP | CYCLES(3),
/* NOP XCHG XCHG XCHG*/
/*90*/ PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* XCHG XCHG XCHG XCHG*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* CBW CWD CALL far WAIT*/
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(1),
/* PUSHF POPF SAHF LAHF*/
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* MOV MOV MOV MOV*/
/*a0*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* MOVSB MOVSW CMPSB CMPSW*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5),
/* TEST TEST STOSB STOSW*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* LODSB LODSW SCASB SCASW*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4),
/* MOV*/
/*b0*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* RET imm RET*/
/*c0*/ INVALID, INVALID, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(2),
/* LES LDS MOV MOV*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* ENTER LEAVE RETF RETF*/
PAIR_NP | CYCLES(15), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(0),
/* INT3 INT INTO IRET*/
PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(0),
/*d0*/ INVALID, INVALID, INVALID, INVALID,
/* AAM AAD SETALC XLAT*/
PAIR_NP | CYCLES(18), PAIR_NP | CYCLES(10), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(4),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5),
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(12), PAIR_NP | CYCLES(12),
/* CALL JMP JMP JMP*/
PAIR_V | CYCLES_REG, PAIR_V | CYCLES_REG, PAIR_NP | CYCLES(0), PAIR_V | CYCLES_REG,
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(12), PAIR_NP | CYCLES(12),
/* REPNE REPE*/
/*f0*/ INVALID, INVALID, PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(0),
/* HLT CMC*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(2), INVALID, INVALID,
/* CLC STC CLI STI*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7),
/* CLD STD INCDEC*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_UV | CYCLES_RMW, INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_mod3[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* ADD ADD PUSH ES POP ES*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* OR OR OR OR*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* OR OR PUSH CS */
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(1), INVALID,
/* ADC ADC ADC ADC*/
/*10*/ PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG,
/* ADC ADC PUSH SS POP SS*/
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* SBB SBB SBB SBB*/
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG,
/* SBB SBB PUSH DS POP DS*/
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* AND AND AND AND*/
/*20*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* AND AND DAA*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* SUB SUB SUB SUB*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* SUB SUB DAS*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* XOR XOR XOR XOR*/
/*30*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* XOR XOR AAA*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* CMP CMP CMP CMP*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* CMP CMP AAS*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3),
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* INC ESP INC EBP INC ESI INC EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* POP EAX POP ECX POP EDX POP EBX*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* POP ESP POP EBP POP ESI POP EDI*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* PUSHA POPA BOUND ARPL*/
/*60*/ PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(8), PAIR_NP | CYCLES(7),
INVALID, INVALID, INVALID, INVALID,
/* PUSH imm IMUL PUSH imm IMUL*/
PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(10), PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(10),
/* INSB INSW OUTSB OUTSW*/
PAIR_NP | CYCLES(9), PAIR_NP | CYCLES(9), PAIR_NP | CYCLES(13), PAIR_NP | CYCLES(13),
/* Jxx*/
/*70*/ PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH, PAIR_V | CYCLES_BRANCH,
/*80*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* TEST TEST XCHG XCHG*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* MOV MOV MOV MOV*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* MOV from seg LEA MOV to seg POP*/
PAIR_NP | CYCLES(1), PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* NOP XCHG XCHG XCHG*/
/*90*/ PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* XCHG XCHG XCHG XCHG*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* CBW CWD CALL far WAIT*/
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(1),
/* PUSHF POPF SAHF LAHF*/
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* MOV MOV MOV MOV*/
/*a0*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* MOVSB MOVSW CMPSB CMPSW*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5),
/* TEST TEST STOSB STOSW*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* LODSB LODSW SCASB SCASW*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4),
/* MOV*/
/*b0*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* RET imm RET*/
/*c0*/ INVALID, INVALID, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(2),
/* LES LDS MOV MOV*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/* ENTER LEAVE RETF RETF*/
PAIR_NP | CYCLES(15), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(0),
/* INT3 INT INTO IRET*/
PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(0),
/*d0*/ INVALID, INVALID, INVALID, INVALID,
/* AAM AAD SETALC XLAT*/
PAIR_NP | CYCLES(18), PAIR_NP | CYCLES(10), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(4),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5),
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(12), PAIR_NP | CYCLES(12),
/* CALL JMP JMP JMP*/
PAIR_V | CYCLES_REG, PAIR_V | CYCLES_REG, PAIR_NP | CYCLES(0), PAIR_V | CYCLES_REG,
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(12), PAIR_NP | CYCLES(12),
/* REPNE REPE*/
/*f0*/ INVALID, INVALID, PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(0),
/* HLT CMC*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(2), INVALID, INVALID,
/* CLC STC CLI STI*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7),
/* CLD STD INCDEC*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_UV | CYCLES_REG, INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_0f[256] = {
// clang-format off
/*00*/ PAIR_NP | CYCLES(20), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(10),
INVALID, PAIR_NP | CYCLES(195), PAIR_NP | CYCLES(7), INVALID,
PAIR_NP | CYCLES(1000), PAIR_NP | CYCLES(10000), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*10*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*20*/ PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6),
PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*30*/ PAIR_NP | CYCLES(9), CYCLES(1), PAIR_NP | CYCLES(9), INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*40*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*50*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*60*/ PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
INVALID, INVALID, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
/*70*/ INVALID, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_NP | CYCLES(100),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
/*80*/ PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/*90*/ PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/*a0*/ PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(8),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(4), INVALID, INVALID,
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), INVALID, PAIR_NP | CYCLES(13),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), INVALID, PAIR_NP | CYCLES(10),
/*b0*/ PAIR_NP | CYCLES(10), PAIR_NP | CYCLES(10), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(13),
PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
INVALID, INVALID, PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(13),
PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/*c0*/ PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1),
PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1),
/*d0*/ INVALID, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
INVALID, PAIR_U | CYCLES_RM, INVALID, INVALID,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID, PAIR_U | CYCLES_RM,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID, PAIR_U | CYCLES_RM,
/*e0*/ INVALID, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID,
INVALID, PAIR_U | CYCLES_RM, INVALID, INVALID,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID, PAIR_U | CYCLES_RM,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID, PAIR_U | CYCLES_RM,
/*f0*/ INVALID, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM,
INVALID, PAIR_U | CYCLES_RM, INVALID, INVALID,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID,
PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, PAIR_U | CYCLES_RM, INVALID,
// clang-format on
};
static uint64_t opcode_timings_p6_0f_mod3[256] = {
// clang-format off
/*00*/ PAIR_NP | CYCLES(20), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(10),
INVALID, PAIR_NP | CYCLES(195), PAIR_NP | CYCLES(7), INVALID,
PAIR_NP | CYCLES(1000), PAIR_NP | CYCLES(10000), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*10*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*20*/ PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6),
PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*30*/ PAIR_NP | CYCLES(9), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(9), INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*40*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*50*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*60*/ PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
INVALID, INVALID, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/*70*/ INVALID, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(100),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
/*80*/ PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/*90*/ PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/*a0*/ PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(8),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(4), INVALID, INVALID,
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), INVALID, PAIR_NP | CYCLES(13),
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), INVALID, PAIR_NP | CYCLES(10),
/*b0*/ PAIR_NP | CYCLES(10), PAIR_NP | CYCLES(10), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(13),
PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
INVALID, INVALID, PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(13),
PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(7), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/*c0*/ PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1),
PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1),
/*d0*/ INVALID, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
INVALID, PAIR_UV | CYCLES_REG, INVALID, INVALID,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_UV | CYCLES_REG,
/*e0*/ INVALID, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID,
INVALID, PAIR_UV | CYCLES_REG, INVALID, INVALID,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID, PAIR_UV | CYCLES_REG,
/*f0*/ INVALID, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
INVALID, PAIR_UV | CYCLES_REG, INVALID, INVALID,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, INVALID,
// clang-format on
};
static uint64_t opcode_timings_p6_shift[8] = {
// clang-format off
PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW,
PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW, PAIR_U | CYCLES_RMW,
// clang-format on
};
static uint64_t opcode_timings_p6_shift_mod3[8] = {
// clang-format off
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG,
PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG, PAIR_U | CYCLES_REG,
// clang-format on
};
static uint64_t opcode_timings_p6_f6[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_UV | CYCLES_RM, INVALID, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(17), PAIR_NP | CYCLES(22)
// clang-format on
};
static uint64_t opcode_timings_p6_f6_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(17), PAIR_NP | CYCLES(22)
// clang-format on
};
static uint64_t opcode_timings_p6_f7[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_UV | CYCLES_RM, INVALID, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES_MULTI(11,10), PAIR_NP | CYCLES_MULTI(11,10), PAIR_NP | CYCLES_MULTI(25,41), PAIR_NP | CYCLES_MULTI(30,46)
// clang-format on
};
static uint64_t opcode_timings_p6_f7_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_UV | CYCLES_REG, INVALID, PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES_MULTI(11,10), PAIR_NP | CYCLES_MULTI(11,10), PAIR_NP | CYCLES_MULTI(25,41), PAIR_NP | CYCLES_MULTI(30,46)
// clang-format on
};
static uint64_t opcode_timings_p6_ff[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
PAIR_UV | CYCLES_RMW, PAIR_UV | CYCLES_RMW, PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(0),
/* JMP JMP far PUSH*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(2), INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_ff_mod3[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(0),
/* JMP JMP far PUSH*/
PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(2), INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_d8[8] = {
// clang-format off
/* FADDs FMULs FCOMs FCOMPs*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0),
/* FSUBs FSUBRs FDIVs FDIVRs*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(39,38,2), PAIR_FX | FPU_CYCLES(39,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0),
/* FSUB FSUBR FDIV FDIVR*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(39,38,2), PAIR_FX | FPU_CYCLES(39,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs*/
PAIR_FX | FPU_CYCLES(1,0,0), INVALID, PAIR_NP | FPU_CYCLES(2,0,0), PAIR_NP | FPU_CYCLES(2,0,0),
/* FLDENV FLDCW FSTENV FSTCW*/
PAIR_NP | FPU_CYCLES(32,0,0), PAIR_NP | FPU_CYCLES(8,0,0), PAIR_NP | FPU_CYCLES(48,0,0), PAIR_NP | FPU_CYCLES(2,0,0)
// clang-format on
};
static uint64_t opcode_timings_p6_d9_mod3[64] = {
// clang-format off
/*FLD*/
PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0),
PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0),
/*FXCH*/
PAIR_FXCH | CYCLES(0), PAIR_FXCH | CYCLES(0), PAIR_FXCH | CYCLES(0), PAIR_FXCH | CYCLES(0),
PAIR_FXCH | CYCLES(0), PAIR_FXCH | CYCLES(0), PAIR_FXCH | CYCLES(0), PAIR_FXCH | CYCLES(0),
/*FNOP*/
PAIR_NP | FPU_CYCLES(3,0,0), INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*FSTP*/
PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0),
PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0),
/* opFCHS opFABS*/
PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0), INVALID, INVALID,
/* opFTST opFXAM*/
PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(21,4,0), INVALID, INVALID,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI*/
PAIR_NP | FPU_CYCLES(2,0,0), PAIR_NP | FPU_CYCLES(5,2,2), PAIR_NP | FPU_CYCLES(5,2,2), PAIR_NP | FPU_CYCLES(5,2,2),
/* opFLDEG2 opFLDLN2 opFLDZ*/
PAIR_NP | FPU_CYCLES(5,2,2), PAIR_NP | FPU_CYCLES(5,2,2), PAIR_NP | FPU_CYCLES(2,0,0), INVALID,
/* opF2XM1 opFYL2X opFPTAN opFPATAN*/
PAIR_NP | FPU_CYCLES(53,2,2), PAIR_NP | FPU_CYCLES(103,2,2), PAIR_NP | FPU_CYCLES(120,36,0), PAIR_NP | FPU_CYCLES(112,2,2),
/* opFDECSTP opFINCSTP,*/
INVALID, INVALID, PAIR_NP | FPU_CYCLES(2,0,0), PAIR_NP | FPU_CYCLES(2,0,0),
/* opFPREM opFSQRT opFSINCOS*/
PAIR_NP | FPU_CYCLES(64,2,2), INVALID, PAIR_NP | FPU_CYCLES(70,69,2), PAIR_NP | FPU_CYCLES(89,2,2),
/* opFRNDINT opFSCALE opFSIN opFCOS*/
PAIR_NP | FPU_CYCLES(9,0,0), PAIR_NP | FPU_CYCLES(20,5,0), PAIR_NP | FPU_CYCLES(65,2,2), PAIR_NP | FPU_CYCLES(65,2,2)
// clang-format on
};
static uint64_t opcode_timings_p6_da[8] = {
// clang-format off
/* FIADDl FIMULl FICOMl FICOMPl*/
PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(4,0,0), PAIR_NP | FPU_CYCLES(4,0,0),
/* FISUBl FISUBRl FIDIVl FIDIVRl*/
PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(42,38,2), PAIR_NP | FPU_CYCLES(42,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_da_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FCOMPP*/
INVALID, PAIR_NP | FPU_CYCLES(1,0,0), INVALID, INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil*/
PAIR_NP | FPU_CYCLES(3,2,2), INVALID, PAIR_NP | FPU_CYCLES(6,0,0), PAIR_NP | FPU_CYCLES(6,0,0),
/* FLDe FSTPe*/
INVALID, PAIR_NP | FPU_CYCLES(3,0,0), INVALID, PAIR_NP | FPU_CYCLES(3,0,0)
// clang-format on
};
static uint64_t opcode_timings_p6_db_mod3[64] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* opFNOP opFCLEX opFINIT*/
INVALID, PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(7,0,0), PAIR_NP | FPU_CYCLES(17,0,0),
/* opFNOP opFNOP*/
PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
// clang-format on
};
static uint64_t opcode_timings_p6_dc[8] = {
// clang-format off
/* FADDd FMULd FCOMd FCOMPd*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(1,0,0), PAIR_FX | FPU_CYCLES(1,0,0),
/* FSUBd FSUBRd FDIVd FDIVRd*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(39,38,2), PAIR_FX | FPU_CYCLES(39,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), INVALID, INVALID,
/* opFSUBRr opFSUBr opFDIVRr opFDIVr*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(39,38,2), PAIR_FX | FPU_CYCLES(39,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd*/
PAIR_FX | FPU_CYCLES(1,0,0), INVALID, PAIR_NP | FPU_CYCLES(2,0,0), PAIR_NP | FPU_CYCLES(2,0,0),
/* FRSTOR FSAVE FSTSW*/
PAIR_NP | FPU_CYCLES(70,0,0), INVALID, PAIR_NP | FPU_CYCLES(127,0,0), PAIR_NP | FPU_CYCLES(6,0,0)
// clang-format on
};
static uint64_t opcode_timings_p6_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP*/
PAIR_NP | FPU_CYCLES(2,0,0), INVALID, PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0),
/* FUCOM FUCOMP*/
PAIR_NP | FPU_CYCLES(1,0,0), PAIR_NP | FPU_CYCLES(1,0,0), INVALID, INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_de[8] = {
// clang-format off
/* FIADDw FIMULw FICOMw FICOMPw*/
PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(4,0,0), PAIR_NP | FPU_CYCLES(4,0,0),
/* FISUBw FISUBRw FIDIVw FIDIVRw*/
PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(6,2,2), PAIR_NP | FPU_CYCLES(42,38,2), PAIR_NP | FPU_CYCLES(42,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_de_mod3[8] = {
// clang-format off
/* FADDP FMULP FCOMPP*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), INVALID, PAIR_FX | FPU_CYCLES(1,0,0),
/* FSUBP FSUBRP FDIVP FDIVRP*/
PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(3,2,2), PAIR_FX | FPU_CYCLES(39,38,2), PAIR_FX | FPU_CYCLES(39,38,2)
// clang-format on
};
static uint64_t opcode_timings_p6_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw*/
PAIR_NP | FPU_CYCLES(3,2,2), INVALID, PAIR_NP | FPU_CYCLES(6,0,0), PAIR_NP | FPU_CYCLES(6,0,0),
/* FILDiq FBSTP FISTPiq*/
INVALID, PAIR_NP | FPU_CYCLES(3,2,2), PAIR_NP | FPU_CYCLES(148,0,0), PAIR_NP | FPU_CYCLES(6,0,0)
// clang-format on
};
static uint64_t opcode_timings_p6_df_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FSTSW AX*/
PAIR_NP | FPU_CYCLES(6,0,0), INVALID, INVALID, INVALID
// clang-format on
};
static uint64_t opcode_timings_p6_81[8] = {
// clang-format off
PAIR_UV | CYCLES_RMW | CYCLES_IMM1632, PAIR_UV | CYCLES_RMW | CYCLES_IMM1632, PAIR_UV | CYCLES_RMW | CYCLES_IMM1632, PAIR_UV | CYCLES_RMW | CYCLES_IMM1632,
PAIR_UV | CYCLES_RMW | CYCLES_IMM1632, PAIR_UV | CYCLES_RMW | CYCLES_IMM1632, PAIR_UV | CYCLES_RMW | CYCLES_IMM1632, PAIR_UV | CYCLES_RM | CYCLES_IMM1632
// clang-format on
};
static uint64_t opcode_timings_p6_81_mod3[8] = {
// clang-format off
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG
// clang-format on
};
static uint64_t opcode_timings_p6_8x[8] = {
// clang-format off
PAIR_UV | CYCLES_RMW | CYCLES_IMM8, PAIR_UV | CYCLES_RMW | CYCLES_IMM8, PAIR_UV | CYCLES_RMW | CYCLES_IMM8, PAIR_UV | CYCLES_RMW | CYCLES_IMM8,
PAIR_UV | CYCLES_RMW | CYCLES_IMM8, PAIR_UV | CYCLES_RMW | CYCLES_IMM8, PAIR_UV | CYCLES_RMW | CYCLES_IMM8, PAIR_UV | CYCLES_RM | CYCLES_IMM8
// clang-format on
};
static uint64_t opcode_timings_p6_8x_mod3[8] = {
// clang-format off
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG,
PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG, PAIR_UV | CYCLES_REG
// clang-format on
};
static int decode_delay;
static int decode_delay_offset;
static uint8_t last_prefix;
static int prefixes;
static inline int
COUNT(uint64_t timings, uint64_t deps, int op_32)
{
if ((timings & PAIR_FPU) && !(deps & FPU_FXCH))
return FPU_I_LATENCY(timings);
if (timings & CYCLES_HAS_MULTI) {
if (op_32 & 0x100)
return ((uintptr_t) timings >> 8) & 0xff;
return (uintptr_t) timings & 0xff;
}
if (!(timings & PAIR_MASK))
return timings & 0xffff;
if ((timings & PAIR_MASK) == PAIR_FX)
return timings & 0xffff;
if ((timings & PAIR_MASK) == PAIR_FXCH)
return timings & 0xffff;
if ((timings & PAIR_UV) && !(timings & PAIR_FPU))
timings &= 3;
switch (timings & CYCLES_MASK) {
case CYCLES_REG:
return 1;
case CYCLES_RM:
return 2;
case CYCLES_RMW:
return 3;
case CYCLES_BRANCH:
return cpu_has_feature(CPU_FEATURE_MMX) ? 1 : 2;
}
fatal("Illegal COUNT %016" PRIu64 "\n", timings);
return timings;
}
static int
codegen_fpu_latencies(uint64_t deps, int reg)
{
int latency = fpu_latency;
if ((deps & FPU_RW_ST0) && fpu_st_latency[0] && fpu_st_latency[0] > latency)
latency = fpu_st_latency[0];
if ((deps & FPU_RW_ST1) && fpu_st_latency[1] && fpu_st_latency[1] > latency)
latency = fpu_st_latency[1];
if ((deps & FPU_RW_STREG) && fpu_st_latency[reg] && fpu_st_latency[reg] > latency)
latency = fpu_st_latency[reg];
return latency;
}
#define SUB_AND_CLAMP(latency, count) \
latency -= count; \
if (latency < 0) \
latency = 0
static void
codegen_fpu_latency_clock(int count)
{
SUB_AND_CLAMP(fpu_latency, count);
SUB_AND_CLAMP(fpu_st_latency[0], count);
SUB_AND_CLAMP(fpu_st_latency[1], count);
SUB_AND_CLAMP(fpu_st_latency[2], count);
SUB_AND_CLAMP(fpu_st_latency[3], count);
SUB_AND_CLAMP(fpu_st_latency[4], count);
SUB_AND_CLAMP(fpu_st_latency[5], count);
SUB_AND_CLAMP(fpu_st_latency[6], count);
SUB_AND_CLAMP(fpu_st_latency[7], count);
}
static inline int
codegen_timing_has_displacement(uint32_t fetchdat, int op_32)
{
if (op_32 & 0x200) {
if ((fetchdat & 7) == 4 && (fetchdat & 0xc0) != 0xc0) {
/*Has SIB*/
if ((fetchdat & 0xc0) == 0x40 || (fetchdat & 0xc0) == 0x80 || (fetchdat & 0x700) == 0x500)
return 1;
} else {
if ((fetchdat & 0xc0) == 0x40 || (fetchdat & 0xc0) == 0x80 || (fetchdat & 0xc7) == 0x05)
return 1;
}
} else {
if ((fetchdat & 0xc0) == 0x40 || (fetchdat & 0xc0) == 0x80 || (fetchdat & 0xc7) == 0x06)
return 1;
}
return 0;
}
/*The instruction is only of interest here if it's longer than 7 bytes, as that's the
limit on Pentium MMX parallel decoding*/
static inline int
codegen_timing_instr_length(uint64_t timing, uint32_t fetchdat, int op_32)
{
int len = prefixes;
if ((timing & CYCLES_MASK) == CYCLES_RM || (timing & CYCLES_MASK) == CYCLES_RMW) {
len += 2; /*Opcode + ModR/M*/
if ((timing & CYCLES_HASIMM) == CYCLES_IMM8)
len++;
if ((timing & CYCLES_HASIMM) == CYCLES_IMM1632)
len += (op_32 & 0x100) ? 4 : 2;
if (op_32 & 0x200) {
if ((fetchdat & 7) == 4 && (fetchdat & 0xc0) != 0xc0) {
/* Has SIB*/
len++;
if ((fetchdat & 0xc0) == 0x40)
len++;
else if ((fetchdat & 0xc0) == 0x80)
len += 4;
else if ((fetchdat & 0x700) == 0x500)
len += 4;
} else {
if ((fetchdat & 0xc0) == 0x40)
len++;
else if ((fetchdat & 0xc0) == 0x80)
len += 4;
else if ((fetchdat & 0xc7) == 0x05)
len += 4;
}
} else {
if ((fetchdat & 0xc0) == 0x40)
len++;
else if ((fetchdat & 0xc0) == 0x80)
len += 2;
else if ((fetchdat & 0xc7) == 0x06)
len += 2;
}
}
return len;
}
void
codegen_timing_pentium_block_start(void)
{
u_pipe_full = decode_delay = decode_delay_offset = 0;
}
void
codegen_timing_pentium_start(void)
{
last_prefix = 0;
prefixes = 0;
}
void
codegen_timing_pentium_prefix(uint8_t prefix, uint32_t fetchdat)
{
prefixes++;
if ((prefix & 0xf8) == 0xd8) {
last_prefix = prefix;
return;
}
if (cpu_has_feature(CPU_FEATURE_MMX) && prefix == 0x0f) {
/*On Pentium MMX 0fh prefix is 'free'*/
last_prefix = prefix;
return;
}
if (cpu_has_feature(CPU_FEATURE_MMX) && (prefix == 0x66 || prefix == 0x67)) {
/*On Pentium MMX 66h and 67h prefixes take 2 clocks*/
decode_delay_offset += 2;
last_prefix = prefix;
return;
}
if (prefix == 0x0f && (fetchdat & 0xf0) == 0x80) {
/*On Pentium 0fh prefix is 'free' when used on conditional jumps*/
last_prefix = prefix;
return;
}
/*On Pentium all prefixes take 1 cycle to decode. Decode may be shadowed
by execution of previous instructions*/
decode_delay_offset++;
last_prefix = prefix;
}
static int
check_agi(uint64_t *deps, uint8_t opcode, uint32_t fetchdat, int op_32)
{
uint32_t addr_regmask = get_addr_regmask(deps[opcode], fetchdat, op_32);
/*Instructions that use ESP implicitly (eg PUSH, POP, CALL etc) do not
cause AGIs with each other, but do with instructions that use it explicitly*/
if ((addr_regmask & REGMASK_IMPL_ESP) && (regmask_modified & (1 << REG_ESP)) && !(regmask_modified & REGMASK_IMPL_ESP))
addr_regmask |= (1 << REG_ESP);
return (regmask_modified & addr_regmask) & ~REGMASK_IMPL_ESP;
}
static void
codegen_instruction(uint64_t *timings, uint64_t *deps, uint8_t opcode, uint32_t fetchdat, int decode_delay_offset, int op_32, int exec_delay)
{
int instr_cycles;
int latency = 0;
if ((timings[opcode] & PAIR_FPU) && !(deps[opcode] & FPU_FXCH))
instr_cycles = latency = codegen_fpu_latencies(deps[opcode], fetchdat & 7);
else {
#if 0
if (timings[opcode] & FPU_WRITE_ST0)
fatal("FPU_WRITE_ST0\n");
if (timings[opcode] & FPU_WRITE_ST1)
fatal("FPU_WRITE_ST1\n");
if (timings[opcode] & FPU_WRITE_STREG)
fatal("FPU_WRITE_STREG\n");*/
#endif
instr_cycles = 0;
}
if ((decode_delay + decode_delay_offset) > 0)
codegen_fpu_latency_clock(decode_delay + decode_delay_offset + instr_cycles);
else
codegen_fpu_latency_clock(instr_cycles);
instr_cycles += COUNT(timings[opcode], deps[opcode], op_32);
instr_cycles += exec_delay;
if ((decode_delay + decode_delay_offset) > 0)
codegen_block_cycles += instr_cycles + decode_delay + decode_delay_offset;
else
codegen_block_cycles += instr_cycles;
decode_delay = (-instr_cycles) + 1;
if (deps[opcode] & FPU_POP) {
for (uint8_t c = 0; c < 7; c++)
fpu_st_latency[c] = fpu_st_latency[c + 1];
fpu_st_latency[7] = 0;
}
if (deps[opcode] & FPU_POP2) {
for (uint8_t c = 0; c < 6; c++)
fpu_st_latency[c] = fpu_st_latency[c + 2];
fpu_st_latency[6] = fpu_st_latency[7] = 0;
}
if ((timings[opcode] & PAIR_FPU) && !(deps[opcode] & FPU_FXCH)) {
fpu_latency = FPU_F_LATENCY(timings[opcode]);
}
if (deps[opcode] & FPU_PUSH) {
for (uint8_t c = 0; c < 7; c++)
fpu_st_latency[c + 1] = fpu_st_latency[c];
fpu_st_latency[0] = 0;
}
if (deps[opcode] & FPU_WRITE_ST0) {
#if 0
if (fpu_st_latency[0])
fatal("Bad latency ST0\n");*/
#endif
fpu_st_latency[0] = FPU_RESULT_LATENCY(timings[opcode]);
}
if (deps[opcode] & FPU_WRITE_ST1) {
#if 0
if (fpu_st_latency[1])
fatal("Bad latency ST1\n");*/
#endif
fpu_st_latency[1] = FPU_RESULT_LATENCY(timings[opcode]);
}
if (deps[opcode] & FPU_WRITE_STREG) {
int reg = fetchdat & 7;
if (deps[opcode] & FPU_POP)
reg--;
if (reg >= 0 && !(reg == 0 && (deps[opcode] & FPU_WRITE_ST0)) && !(reg == 1 && (deps[opcode] & FPU_WRITE_ST1))) {
fpu_st_latency[reg] = FPU_RESULT_LATENCY(timings[opcode]);
}
}
}
void
codegen_timing_pentium_opcode(uint8_t opcode, uint32_t fetchdat, int op_32, UNUSED(uint32_t op_pc))
{
uint64_t *timings;
uint64_t *deps;
int mod3 = ((fetchdat & 0xc0) == 0xc0);
int bit8 = !(opcode & 1);
int agi_stall = 0;
switch (last_prefix) {
case 0x0f:
timings = mod3 ? opcode_timings_p6_0f_mod3 : opcode_timings_p6_0f;
deps = mod3 ? opcode_deps_0f_mod3 : opcode_deps_0f;
break;
case 0xd8:
timings = mod3 ? opcode_timings_p6_d8_mod3 : opcode_timings_p6_d8;
deps = mod3 ? opcode_deps_d8_mod3 : opcode_deps_d8;
opcode = (opcode >> 3) & 7;
break;
case 0xd9:
timings = mod3 ? opcode_timings_p6_d9_mod3 : opcode_timings_p6_d9;
deps = mod3 ? opcode_deps_d9_mod3 : opcode_deps_d9;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xda:
timings = mod3 ? opcode_timings_p6_da_mod3 : opcode_timings_p6_da;
deps = mod3 ? opcode_deps_da_mod3 : opcode_deps_da;
opcode = (opcode >> 3) & 7;
break;
case 0xdb:
timings = mod3 ? opcode_timings_p6_db_mod3 : opcode_timings_p6_db;
deps = mod3 ? opcode_deps_db_mod3 : opcode_deps_db;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xdc:
timings = mod3 ? opcode_timings_p6_dc_mod3 : opcode_timings_p6_dc;
deps = mod3 ? opcode_deps_dc_mod3 : opcode_deps_dc;
opcode = (opcode >> 3) & 7;
break;
case 0xdd:
timings = mod3 ? opcode_timings_p6_dd_mod3 : opcode_timings_p6_dd;
deps = mod3 ? opcode_deps_dd_mod3 : opcode_deps_dd;
opcode = (opcode >> 3) & 7;
break;
case 0xde:
timings = mod3 ? opcode_timings_p6_de_mod3 : opcode_timings_p6_de;
deps = mod3 ? opcode_deps_de_mod3 : opcode_deps_de;
opcode = (opcode >> 3) & 7;
break;
case 0xdf:
timings = mod3 ? opcode_timings_p6_df_mod3 : opcode_timings_p6_df;
deps = mod3 ? opcode_deps_df_mod3 : opcode_deps_df;
opcode = (opcode >> 3) & 7;
break;
default:
switch (opcode) {
case 0x80:
case 0x82:
case 0x83:
timings = mod3 ? opcode_timings_p6_8x_mod3 : opcode_timings_p6_8x;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0x81:
timings = mod3 ? opcode_timings_p6_81_mod3 : opcode_timings_p6_81;
deps = mod3 ? opcode_deps_81_mod3 : opcode_deps_81;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc0:
case 0xc1:
case 0xd0:
case 0xd1:
timings = mod3 ? opcode_timings_p6_shift_mod3 : opcode_timings_p6_shift;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xd2:
case 0xd3:
timings = mod3 ? opcode_timings_p6_shift_mod3 : opcode_timings_p6_shift;
deps = mod3 ? opcode_deps_shift_cl_mod3 : opcode_deps_shift_cl;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf6:
timings = mod3 ? opcode_timings_p6_f6_mod3 : opcode_timings_p6_f6;
deps = mod3 ? opcode_deps_f6_mod3 : opcode_deps_f6;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf7:
timings = mod3 ? opcode_timings_p6_f7_mod3 : opcode_timings_p6_f7;
deps = mod3 ? opcode_deps_f7_mod3 : opcode_deps_f7;
opcode = (fetchdat >> 3) & 7;
break;
case 0xff:
timings = mod3 ? opcode_timings_p6_ff_mod3 : opcode_timings_p6_ff;
deps = mod3 ? opcode_deps_ff_mod3 : opcode_deps_ff;
opcode = (fetchdat >> 3) & 7;
break;
default:
timings = mod3 ? opcode_timings_p6_mod3 : opcode_timings_p6;
deps = mod3 ? opcode_deps_mod3 : opcode_deps;
break;
}
}
if (u_pipe_full) {
uint8_t regmask = get_srcdep_mask(deps[opcode], fetchdat, bit8, u_pipe_op_32);
if ((u_pipe_timings[u_pipe_opcode] & PAIR_MASK) == PAIR_FX && (timings[opcode] & PAIR_MASK) != PAIR_FXCH)
goto nopair;
if ((timings[opcode] & PAIR_MASK) == PAIR_FXCH && (u_pipe_timings[u_pipe_opcode] & PAIR_MASK) != PAIR_FX)
goto nopair;
if ((u_pipe_timings[u_pipe_opcode] & PAIR_MASK) == PAIR_FX && (timings[opcode] & PAIR_MASK) == PAIR_FXCH) {
int temp;
if (check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
agi_stall = 1;
codegen_instruction(u_pipe_timings, u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_decode_delay_offset, u_pipe_op_32, agi_stall);
temp = fpu_st_latency[fetchdat & 7];
fpu_st_latency[fetchdat & 7] = fpu_st_latency[0];
fpu_st_latency[0] = temp;
u_pipe_full = 0;
decode_delay_offset = 0;
regmask_modified = u_pipe_regmask;
addr_regmask = 0;
return;
}
if ((timings[opcode] & PAIR_V) && !(u_pipe_regmask & regmask) && (decode_delay + decode_delay_offset + u_pipe_decode_delay_offset) <= 0) {
int has_displacement;
if (timings[opcode] & CYCLES_HASIMM)
has_displacement = codegen_timing_has_displacement(fetchdat, op_32);
else
has_displacement = 0;
if (!has_displacement && (!cpu_has_feature(CPU_FEATURE_MMX) || codegen_timing_instr_length(timings[opcode], fetchdat, op_32) <= 7)) {
int t1 = u_pipe_timings[u_pipe_opcode] & CYCLES_MASK;
int t2 = timings[opcode] & CYCLES_MASK;
int t_pair;
uint64_t temp_timing;
uint64_t temp_deps = 0;
if (!(u_pipe_timings[u_pipe_opcode] & PAIR_FPU))
t1 &= 3;
if (!(timings[opcode] & PAIR_FPU))
t2 &= 3;
if (t1 < 0 || t2 < 0 || t1 > CYCLES_BRANCH || t2 > CYCLES_BRANCH)
fatal("Pair out of range\n");
t_pair = pair_timings[t1][t2];
if (t_pair < 1)
fatal("Illegal pair timings : t1=%i t2=%i u_opcode=%02x v_opcode=%02x\n", t1, t2, u_pipe_opcode, opcode);
/*Instruction can pair with previous*/
temp_timing = t_pair;
if (check_agi(deps, opcode, fetchdat, op_32) || check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
agi_stall = 1;
codegen_instruction(&temp_timing, &temp_deps, 0, 0, 0, 0, agi_stall);
u_pipe_full = 0;
decode_delay_offset = 0;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8) | u_pipe_regmask;
addr_regmask = 0;
return;
}
}
nopair:
/*Instruction can not pair with previous*/
/*Run previous now*/
if (check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
agi_stall = 1;
codegen_instruction(u_pipe_timings, u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_decode_delay_offset, u_pipe_op_32, agi_stall);
u_pipe_full = 0;
regmask_modified = u_pipe_regmask;
addr_regmask = 0;
}
if ((timings[opcode] & PAIR_U) && (decode_delay + decode_delay_offset) <= 0) {
int has_displacement;
if (timings[opcode] & CYCLES_HASIMM)
has_displacement = codegen_timing_has_displacement(fetchdat, op_32);
else
has_displacement = 0;
if ((!has_displacement || cpu_has_feature(CPU_FEATURE_MMX)) && (!cpu_has_feature(CPU_FEATURE_MMX) || codegen_timing_instr_length(timings[opcode], fetchdat, op_32) <= 7)) {
/*Instruction might pair with next*/
u_pipe_full = 1;
u_pipe_opcode = opcode;
u_pipe_timings = timings;
u_pipe_op_32 = op_32;
u_pipe_regmask = get_dstdep_mask(deps[opcode], fetchdat, bit8);
u_pipe_fetchdat = fetchdat;
u_pipe_decode_delay_offset = decode_delay_offset;
u_pipe_deps = deps;
decode_delay_offset = 0;
return;
}
}
/*Instruction can not pair and must run now*/
if (check_agi(deps, opcode, fetchdat, op_32))
agi_stall = 1;
codegen_instruction(timings, deps, opcode, fetchdat, decode_delay_offset, op_32, agi_stall);
decode_delay_offset = 0;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8);
addr_regmask = 0;
}
void
codegen_timing_pentium_block_end(void)
{
if (u_pipe_full) {
/*Run previous now*/
if (check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
codegen_block_cycles++;
codegen_block_cycles += COUNT(u_pipe_timings[u_pipe_opcode], u_pipe_deps[u_pipe_opcode], u_pipe_op_32) + decode_delay + decode_delay_offset;
u_pipe_full = 0;
}
}
codegen_timing_t codegen_timing_pentium = {
codegen_timing_pentium_start,
codegen_timing_pentium_prefix,
codegen_timing_pentium_opcode,
codegen_timing_pentium_block_start,
codegen_timing_pentium_block_end,
NULL
};
``` | /content/code_sandbox/src/cpu/codegen_timing_pentium.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 22,620 |
```objective-c
#define BIAS80 16383
#define BIAS64 1023
typedef struct {
int16_t begin;
union {
double d;
uint64_t ll;
} eind;
} x87_conv_t;
static __inline double
x87_from80(x87_conv_t *test)
{
int64_t exp64;
int64_t blah;
int64_t exp64final;
int64_t mant64;
int64_t sign;
exp64 = (((test->begin & 0x7fff) - BIAS80));
blah = ((exp64 > 0) ? exp64 : -exp64) & 0x3ff;
exp64final = ((exp64 > 0) ? blah : -blah) + BIAS64;
mant64 = (test->eind.ll >> 11) & (0xfffffffffffffLL);
sign = (test->begin & 0x8000) ? 1 : 0;
if ((test->begin & 0x7fff) == 0x7fff)
exp64final = 0x7ff;
if ((test->begin & 0x7fff) == 0)
exp64final = 0;
if (test->eind.ll & 0x400)
mant64++;
test->eind.ll = (sign << 63) | (exp64final << 52) | mant64;
return test->eind.d;
}
static __inline void
x87_to80(double d, x87_conv_t *test)
{
int64_t sign80;
int64_t exp80;
int64_t exp80final;
int64_t mant80;
int64_t mant80final;
test->eind.d = d;
sign80 = (test->eind.ll & (0x8000000000000000LL)) ? 1 : 0;
exp80 = test->eind.ll & (0x7ff0000000000000LL);
exp80final = (exp80 >> 52);
mant80 = test->eind.ll & (0x000fffffffffffffLL);
mant80final = (mant80 << 11);
if (exp80final == 0x7ff) /*Infinity / Nan*/
{
exp80final = 0x7fff;
mant80final |= (0x8000000000000000LL);
} else if (d != 0) { /* Zero is a special case */
/* Elvira wants the 8 and tcalc doesn't */
mant80final |= (0x8000000000000000LL);
/* Ca-cyber doesn't like this when result is zero. */
exp80final += (BIAS80 - BIAS64);
}
test->begin = (((int16_t) sign80) << 15) | (int16_t) exp80final;
test->eind.ll = mant80final;
}
``` | /content/code_sandbox/src/cpu/x87_ops_conv.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 657 |
```objective-c
static int
opMOVSB_a16(uint32_t fetchdat)
{
uint8_t temp;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI);
CHECK_WRITE(&cpu_state.seg_es, DI, DI);
high_page = 0;
do_mmut_rb(cpu_state.ea_seg->base, SI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_wb(es, DI, &addr64_2);
if (cpu_state.abrt)
return 1;
temp = readmemb_n(cpu_state.ea_seg->base, SI, addr64);
if (cpu_state.abrt)
return 1;
writememb_n(es, DI, addr64_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
DI--;
SI--;
} else {
DI++;
SI++;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opMOVSB_a32(uint32_t fetchdat)
{
uint8_t temp;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI);
high_page = 0;
do_mmut_rb(cpu_state.ea_seg->base, ESI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_wb(es, EDI, &addr64_2);
if (cpu_state.abrt)
return 1;
temp = readmemb_n(cpu_state.ea_seg->base, ESI, addr64);
if (cpu_state.abrt)
return 1;
writememb_n(es, EDI, addr64_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
EDI--;
ESI--;
} else {
EDI++;
ESI++;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opMOVSW_a16(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 1UL);
high_page = 0;
do_mmut_rw(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_ww(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmemw_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
writememw_n(es, DI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
DI -= 2;
SI -= 2;
} else {
DI += 2;
SI += 2;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opMOVSW_a32(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 1UL);
high_page = 0;
do_mmut_rw(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_ww(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmemw_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
writememw_n(es, EDI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
EDI -= 2;
ESI -= 2;
} else {
EDI += 2;
ESI += 2;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opMOVSL_a16(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 3UL);
high_page = 0;
do_mmut_rl(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_wl(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmeml_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
writememl_n(es, DI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
DI -= 4;
SI -= 4;
} else {
DI += 4;
SI += 4;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 1, 0);
return 0;
}
static int
opMOVSL_a32(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 3UL);
high_page = 0;
do_mmut_rl(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_wl(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmeml_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
writememl_n(es, EDI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
EDI -= 4;
ESI -= 4;
} else {
EDI += 4;
ESI += 4;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 1, 1);
return 0;
}
static int
opCMPSB_a16(uint32_t fetchdat)
{
uint8_t src;
uint8_t dst;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI);
CHECK_READ(&cpu_state.seg_es, DI, DI);
high_page = uncached = 0;
do_mmut_rb(cpu_state.ea_seg->base, SI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_rb2(es, DI, &addr64_2);
if (cpu_state.abrt)
return 1;
src = readmemb_n(cpu_state.ea_seg->base, SI, addr64);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + DI) >> 12] = old_rl2;
dst = readmemb_n(es, DI, addr64_2);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + DI) >> 12] = (uintptr_t) LOOKUP_INV;
setsub8(src, dst);
if (cpu_state.flags & D_FLAG) {
DI--;
SI--;
} else {
DI++;
SI++;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 0);
return 0;
}
static int
opCMPSB_a32(uint32_t fetchdat)
{
uint8_t src;
uint8_t dst;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
CHECK_READ(&cpu_state.seg_es, EDI, EDI);
high_page = uncached = 0;
do_mmut_rb(cpu_state.ea_seg->base, ESI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_rb2(es, EDI, &addr64_2);
if (cpu_state.abrt)
return 1;
src = readmemb_n(cpu_state.ea_seg->base, ESI, addr64);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + EDI) >> 12] = old_rl2;
dst = readmemb_n(es, EDI, addr64_2);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + EDI) >> 12] = (uintptr_t) LOOKUP_INV;
setsub8(src, dst);
if (cpu_state.flags & D_FLAG) {
EDI--;
ESI--;
} else {
EDI++;
ESI++;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 1);
return 0;
}
static int
opCMPSW_a16(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
CHECK_READ(&cpu_state.seg_es, DI, DI + 1UL);
high_page = uncached = 0;
do_mmut_rw(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rw2(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmemw_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + DI) >> 12] = old_rl2;
dst = readmemw_n(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + DI) >> 12] = (uintptr_t) LOOKUP_INV;
setsub16(src, dst);
if (cpu_state.flags & D_FLAG) {
DI -= 2;
SI -= 2;
} else {
DI += 2;
SI += 2;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 0);
return 0;
}
static int
opCMPSW_a32(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 1UL);
high_page = uncached = 0;
do_mmut_rw(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rw2(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmemw_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + EDI) >> 12] = old_rl2;
dst = readmemw_n(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + EDI) >> 12] = (uintptr_t) LOOKUP_INV;
setsub16(src, dst);
if (cpu_state.flags & D_FLAG) {
EDI -= 2;
ESI -= 2;
} else {
EDI += 2;
ESI += 2;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 1);
return 0;
}
static int
opCMPSL_a16(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
CHECK_READ(&cpu_state.seg_es, DI, DI + 3UL);
high_page = uncached = 0;
do_mmut_rl(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rl2(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmeml_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + DI) >> 12] = old_rl2;
dst = readmeml_n(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + DI) >> 12] = (uintptr_t) LOOKUP_INV;
setsub32(src, dst);
if (cpu_state.flags & D_FLAG) {
DI -= 4;
SI -= 4;
} else {
DI += 4;
SI += 4;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 0, 2, 0, 0, 0);
return 0;
}
static int
opCMPSL_a32(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 3UL);
high_page = uncached = 0;
do_mmut_rl(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rl2(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmeml_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + EDI) >> 12] = old_rl2;
dst = readmeml_n(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
if (uncached)
readlookup2[(uint32_t) (es + EDI) >> 12] = (uintptr_t) LOOKUP_INV;
setsub32(src, dst);
if (cpu_state.flags & D_FLAG) {
EDI -= 4;
ESI -= 4;
} else {
EDI += 4;
ESI += 4;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 0, 2, 0, 0, 1);
return 0;
}
static int
opSTOSB_a16(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, DI, DI);
writememb(es, DI, AL);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI--;
else
DI++;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0);
return 0;
}
static int
opSTOSB_a32(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI);
writememb(es, EDI, AL);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI--;
else
EDI++;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 1);
return 0;
}
static int
opSTOSW_a16(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 1UL);
writememw(es, DI, AX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 2;
else
DI += 2;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0);
return 0;
}
static int
opSTOSW_a32(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 1UL);
writememw(es, EDI, AX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 2;
else
EDI += 2;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 1);
return 0;
}
static int
opSTOSL_a16(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 3UL);
writememl(es, DI, EAX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 4;
else
DI += 4;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 0);
return 0;
}
static int
opSTOSL_a32(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 3UL);
writememl(es, EDI, EAX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 4;
else
EDI += 4;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 1);
return 0;
}
static int
opLODSB_a16(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI);
temp = readmemb(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
AL = temp;
if (cpu_state.flags & D_FLAG)
SI--;
else
SI++;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opLODSB_a32(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
temp = readmemb(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
AL = temp;
if (cpu_state.flags & D_FLAG)
ESI--;
else
ESI++;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opLODSW_a16(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
AX = temp;
if (cpu_state.flags & D_FLAG)
SI -= 2;
else
SI += 2;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opLODSW_a32(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
AX = temp;
if (cpu_state.flags & D_FLAG)
ESI -= 2;
else
ESI += 2;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opLODSL_a16(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
EAX = temp;
if (cpu_state.flags & D_FLAG)
SI -= 4;
else
SI += 4;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 0);
return 0;
}
static int
opLODSL_a32(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
EAX = temp;
if (cpu_state.flags & D_FLAG)
ESI -= 4;
else
ESI += 4;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 1);
return 0;
}
static int
opSCASB_a16(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, DI, DI);
temp = readmemb(es, DI);
if (cpu_state.abrt)
return 1;
setsub8(AL, temp);
if (cpu_state.flags & D_FLAG)
DI--;
else
DI++;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opSCASB_a32(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, EDI, EDI);
temp = readmemb(es, EDI);
if (cpu_state.abrt)
return 1;
setsub8(AL, temp);
if (cpu_state.flags & D_FLAG)
EDI--;
else
EDI++;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opSCASW_a16(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, DI, DI + 1UL);
temp = readmemw(es, DI);
if (cpu_state.abrt)
return 1;
setsub16(AX, temp);
if (cpu_state.flags & D_FLAG)
DI -= 2;
else
DI += 2;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opSCASW_a32(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 1UL);
temp = readmemw(es, EDI);
if (cpu_state.abrt)
return 1;
setsub16(AX, temp);
if (cpu_state.flags & D_FLAG)
EDI -= 2;
else
EDI += 2;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opSCASL_a16(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, DI, DI + 3UL);
temp = readmeml(es, DI);
if (cpu_state.abrt)
return 1;
setsub32(EAX, temp);
if (cpu_state.flags & D_FLAG)
DI -= 4;
else
DI += 4;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 0, 0);
return 0;
}
static int
opSCASL_a32(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 3UL);
temp = readmeml(es, EDI);
if (cpu_state.abrt)
return 1;
setsub32(EAX, temp);
if (cpu_state.flags & D_FLAG)
EDI -= 4;
else
EDI += 4;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 0, 1);
return 0;
}
static int
opINSB_a16(uint32_t fetchdat)
{
uint8_t temp;
addr64 = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 1);
CHECK_WRITE(&cpu_state.seg_es, DI, DI);
high_page = 0;
do_mmut_wb(es, DI, &addr64);
if (cpu_state.abrt)
return 1;
temp = inb(DX);
writememb_n(es, DI, addr64, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI--;
else
DI++;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opINSB_a32(uint32_t fetchdat)
{
uint8_t temp;
addr64 = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 1);
high_page = 0;
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI);
do_mmut_wb(es, EDI, &addr64);
if (cpu_state.abrt)
return 1;
temp = inb(DX);
writememb_n(es, EDI, addr64, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI--;
else
EDI++;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opINSW_a16(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 2);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 1UL);
high_page = 0;
do_mmut_ww(es, DI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inw(DX);
writememw_n(es, DI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 2;
else
DI += 2;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opINSW_a32(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
high_page = 0;
check_io_perm(DX, 2);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 1UL);
do_mmut_ww(es, EDI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inw(DX);
writememw_n(es, EDI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 2;
else
EDI += 2;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opINSL_a16(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 4);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 3UL);
high_page = 0;
do_mmut_wl(es, DI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inl(DX);
writememl_n(es, DI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 4;
else
DI += 4;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 0, 1, 0, 1, 0);
return 0;
}
static int
opINSL_a32(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 4);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 3UL);
high_page = 0;
do_mmut_wl(es, DI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inl(DX);
writememl_n(es, EDI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 4;
else
EDI += 4;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 0, 1, 0, 1, 1);
return 0;
}
static int
opOUTSB_a16(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI);
temp = readmemb(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 1);
if (cpu_state.flags & D_FLAG)
SI--;
else
SI++;
outb(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opOUTSB_a32(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
temp = readmemb(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 1);
if (cpu_state.flags & D_FLAG)
ESI--;
else
ESI++;
outb(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opOUTSW_a16(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 2);
if (cpu_state.flags & D_FLAG)
SI -= 2;
else
SI += 2;
outw(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opOUTSW_a32(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 2);
if (cpu_state.flags & D_FLAG)
ESI -= 2;
else
ESI += 2;
outw(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opOUTSL_a16(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 4);
if (cpu_state.flags & D_FLAG)
SI -= 4;
else
SI += 4;
outl(EDX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 0, 1, 0, 1, 0);
return 0;
}
static int
opOUTSL_a32(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 4);
if (cpu_state.flags & D_FLAG)
ESI -= 4;
else
ESI += 4;
outl(EDX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 0, 1, 0, 1, 1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_string.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 9,526 |
```objective-c
#define cmp_FPU(name, optype, a_size, load_var, rw, use_var, is_nan, cycle_postfix) \
static int sf_FCOM##name##_a##a_size(uint32_t fetchdat) \
{ \
floatx80 a; \
int rc; \
struct softfloat_status_t status; \
optype temp; \
FP_ENTER(); \
FPU_check_pending_exceptions(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = rw; \
if (cpu_state.abrt) \
return 1; \
clear_C1(); \
if (IS_TAG_EMPTY(0)) { \
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0); \
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3); \
goto next_ins; \
} \
status = i387cw_to_softfloat_status_word(i387_get_control_word()); \
a = FPU_read_regi(0); \
if (is_nan) { \
rc = softfloat_relation_unordered; \
softfloat_raiseFlags(&status, softfloat_flag_invalid); \
} else { \
rc = extF80_compare_normal(a, use_var, &status); \
} \
setcc(FPU_status_word_flags_fpu_compare(rc)); \
FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0); \
\
next_ins: \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom##cycle_postfix) : ((x87_timings.fcom##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom##cycle_postfix) : ((x87_concurrency.fcom##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int sf_FCOMP##name##_a##a_size(uint32_t fetchdat) \
{ \
floatx80 a; \
int rc; \
struct softfloat_status_t status; \
optype temp; \
FP_ENTER(); \
FPU_check_pending_exceptions(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = rw; \
if (cpu_state.abrt) \
return 1; \
clear_C1(); \
if (IS_TAG_EMPTY(0)) { \
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0); \
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3); \
if (is_IA_masked()) \
FPU_pop(); \
\
goto next_ins; \
} \
status = i387cw_to_softfloat_status_word(i387_get_control_word()); \
a = FPU_read_regi(0); \
if (is_nan) { \
rc = softfloat_relation_unordered; \
softfloat_raiseFlags(&status, softfloat_flag_invalid); \
} else { \
rc = extF80_compare_normal(a, use_var, &status); \
} \
setcc(FPU_status_word_flags_fpu_compare(rc)); \
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) \
FPU_pop(); \
\
next_ins: \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom##cycle_postfix) : ((x87_timings.fcom##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom##cycle_postfix) : ((x87_concurrency.fcom##cycle_postfix) * cpu_multi)); \
return 0; \
}
// clang-format off
cmp_FPU(s, float32, 16, temp, geteal(), f32_to_extF80(temp, &status), extF80_isNaN(a) || extF80_isUnsupported(a) || f32_isNaN(temp), _32)
#ifndef FPU_8087
cmp_FPU(s, float32, 32, temp, geteal(), f32_to_extF80(temp, &status), extF80_isNaN(a) || extF80_isUnsupported(a) || f32_isNaN(temp), _32)
#endif
cmp_FPU(d, float64, 16, temp, geteaq(), f64_to_extF80(temp, &status), extF80_isNaN(a) || extF80_isUnsupported(a) || f64_isNaN(temp), _64)
#ifndef FPU_8087
cmp_FPU(d, float64, 32, temp, geteaq(), f64_to_extF80(temp, &status), extF80_isNaN(a) || extF80_isUnsupported(a) || f64_isNaN(temp), _64)
#endif
cmp_FPU(iw, int16_t, 16, temp, (int16_t)geteaw(), i32_to_extF80((int32_t)temp), 0, _i16)
#ifndef FPU_8087
cmp_FPU(iw, int16_t, 32, temp, (int16_t)geteaw(), i32_to_extF80((int32_t)temp), 0, _i16)
#endif
cmp_FPU(il, int32_t, 16, temp, (int32_t)geteal(), i32_to_extF80(temp), 0, _i32)
#ifndef FPU_8087
cmp_FPU(il, int32_t, 32, temp, (int32_t)geteal(), i32_to_extF80(temp), 0, _i32)
#endif
// clang-format on
static int
sf_FCOM_sti(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_normal(a, b, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
static int
sf_FCOMP_sti(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (is_IA_masked()) {
FPU_pop();
}
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_normal(a, b, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_pop();
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
static int
sf_FCOMPP(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (is_IA_masked()) {
FPU_pop();
FPU_pop();
}
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(1);
rc = extF80_compare_normal(a, b, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_pop();
FPU_pop();
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FUCOMPP(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (is_IA_masked()) {
FPU_pop();
FPU_pop();
}
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(1);
rc = extF80_compare_quiet(a, b, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_pop();
FPU_pop();
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
#ifndef OPS_286_386
static int
sf_FCOMI_st0_stj(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
flags_rebuild();
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
cpu_state.flags |= (Z_FLAG | P_FLAG | C_FLAG);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_normal(a, b, &status);
FPU_write_eflags_fpu_compare(rc);
FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
static int
sf_FCOMIP_st0_stj(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
flags_rebuild();
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
cpu_state.flags |= (Z_FLAG | P_FLAG | C_FLAG);
if (is_IA_masked()) {
FPU_pop();
}
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_normal(a, b, &status);
FPU_write_eflags_fpu_compare(rc);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_pop();
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
#endif
static int
sf_FUCOM_sti(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_quiet(a, b, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
static int
sf_FUCOMP_sti(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (is_IA_masked())
FPU_pop();
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_quiet(a, b, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0))
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
# ifndef OPS_286_386
static int
sf_FUCOMI_st0_stj(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
flags_rebuild();
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
cpu_state.flags |= (Z_FLAG | P_FLAG | C_FLAG);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_quiet(a, b, &status);
FPU_write_eflags_fpu_compare(rc);
FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
static int
sf_FUCOMIP_st0_stj(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
flags_rebuild();
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(fetchdat & 7)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
cpu_state.flags |= (Z_FLAG | P_FLAG | C_FLAG);
if (is_IA_masked())
FPU_pop();
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(fetchdat & 7);
rc = extF80_compare_quiet(a, b, &status);
FPU_write_eflags_fpu_compare(rc);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0))
FPU_pop();
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
#endif
#endif
static int
sf_FTST(uint32_t fetchdat)
{
const floatx80 Const_Z = packFloatx80(0, 0x0000, 0);
struct softfloat_status_t status;
int rc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
setcc(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
} else {
status = i387cw_to_softfloat_status_word(i387_get_control_word());
rc = extF80_compare_normal(FPU_read_regi(0), Const_Z, &status);
setcc(FPU_status_word_flags_fpu_compare(rc));
FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.ftst) : (x87_timings.ftst * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.ftst) : (x87_concurrency.ftst * cpu_multi));
return 0;
}
static int
sf_FXAM(uint32_t fetchdat)
{
floatx80 reg;
int sign;
softfloat_class_t aClass;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
reg = FPU_read_regi(0);
sign = extF80_sign(reg);
/*
* Examine the contents of the ST(0) register and sets the condition
* code flags C0, C2 and C3 in the FPU status word to indicate the
* class of value or number in the register.
*/
if (IS_TAG_EMPTY(0)) {
setcc(FPU_SW_C0 | FPU_SW_C1 | FPU_SW_C3);
} else {
aClass = extF80_class(reg);
switch (aClass) {
case softfloat_zero:
setcc(FPU_SW_C1 | FPU_SW_C3);
break;
case softfloat_SNaN:
case softfloat_QNaN:
// unsupported handled as NaNs
if (extF80_isUnsupported(reg))
setcc(FPU_SW_C1);
else
setcc(FPU_SW_C0 | FPU_SW_C1);
break;
case softfloat_negative_inf:
case softfloat_positive_inf:
setcc(FPU_SW_C0 | FPU_SW_C1 | FPU_SW_C2);
break;
case softfloat_denormal:
setcc(FPU_SW_C1 | FPU_SW_C2 | FPU_SW_C3);
break;
case softfloat_normalized:
setcc(FPU_SW_C1 | FPU_SW_C2);
break;
}
}
/*
* The C1 flag is set to the sign of the value in ST(0), regardless
* of whether the register is empty or full.
*/
if (!sign)
clear_C1();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fxam) : (x87_timings.fxam * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fxam) : (x87_concurrency.fxam * cpu_multi));
return 0;
}
``` | /content/code_sandbox/src/cpu/x87_ops_sf_compare.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 5,462 |
```objective-c
static int
sf_FXCH_sti(uint32_t fetchdat)
{
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
floatx80 st0_reg;
floatx80 sti_reg;
int st0_tag;
int sti_tag;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
st0_tag = FPU_gettagi(0);
sti_tag = FPU_gettagi(fetchdat & 7);
st0_reg = FPU_read_regi(0);
sti_reg = FPU_read_regi(fetchdat & 7);
clear_C1();
if ((st0_tag == X87_TAG_EMPTY) || (sti_tag == X87_TAG_EMPTY)) {
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
if (is_IA_masked()) {
/* Masked response */
if (st0_tag == X87_TAG_EMPTY)
st0_reg = floatx80_default_nan;
if (sti_tag == X87_TAG_EMPTY)
sti_reg = floatx80_default_nan;
} else
goto next_ins;
}
FPU_save_regi(st0_reg, fetchdat & 7);
FPU_save_regi(sti_reg, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fxch) : (x87_timings.fxch * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fxch) : (x87_concurrency.fxch * cpu_multi));
return 0;
}
static int
sf_FCHS(uint32_t fetchdat)
{
floatx80 st0_reg;
floatx80 result;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
if (IS_TAG_EMPTY(0))
FPU_stack_underflow(fetchdat, 0, 0);
else {
clear_C1();
st0_reg = FPU_read_regi(0);
result = floatx80_chs(st0_reg);
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fchs) : (x87_timings.fchs * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fchs) : (x87_concurrency.fchs * cpu_multi));
return 0;
}
static int
sf_FABS(uint32_t fetchdat)
{
floatx80 st0_reg;
floatx80 result;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
if (IS_TAG_EMPTY(0))
FPU_stack_underflow(fetchdat, 0, 0);
else {
clear_C1();
st0_reg = FPU_read_regi(0);
result = floatx80_abs(st0_reg);
FPU_save_regi(result, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fabs) : (x87_timings.fabs * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fabs) : (x87_concurrency.fabs * cpu_multi));
return 0;
}
static int
sf_FDECSTP(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
fpu_state.tos = (fpu_state.tos - 1) & 7;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fincdecstp) : (x87_timings.fincdecstp * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fincdecstp) : (x87_concurrency.fincdecstp * cpu_multi));
return 0;
}
static int
sf_FINCSTP(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
fpu_state.tos = (fpu_state.tos + 1) & 7;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fincdecstp) : (x87_timings.fincdecstp * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fincdecstp) : (x87_concurrency.fincdecstp * cpu_multi));
return 0;
}
static int
sf_FFREE_sti(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
FPU_settagi(X87_TAG_EMPTY, fetchdat & 7);
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.ffree) : (x87_timings.ffree * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.ffree) : (x87_concurrency.ffree * cpu_multi));
return 0;
}
static int
sf_FFREEP_sti(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
FPU_settagi(X87_TAG_EMPTY, fetchdat & 7);
if (cpu_state.abrt)
return 1;
FPU_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.ffree) : (x87_timings.ffree * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.ffree) : (x87_concurrency.ffree * cpu_multi));
return 0;
}
``` | /content/code_sandbox/src/cpu/x87_ops_sf_misc.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,376 |
```objective-c
/* A fast way to find out whether x is one of RC_DOWN or RC_CHOP
(and not one of RC_RND or RC_UP).
*/
#define DOWN_OR_CHOP() (fpu_state.cwd & FPU_CW_RC & FPU_RC_DOWN)
static int
sf_FLDL2T(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(FPU_round_const(Const_L2T, (fpu_state.cwd & FPU_CW_RC) == FPU_RC_UP), 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
sf_FLDL2E(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(FPU_round_const(Const_L2E, DOWN_OR_CHOP() ? -1 : 0), 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
sf_FLDPI(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(FPU_round_const(Const_PI, DOWN_OR_CHOP() ? -1 : 0), 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
sf_FLDEG2(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(FPU_round_const(Const_LG2, DOWN_OR_CHOP() ? -1 : 0), 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
sf_FLDLN2(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(FPU_round_const(Const_LN2, DOWN_OR_CHOP() ? -1 : 0), 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_const) : (x87_timings.fld_const * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_const) : (x87_concurrency.fld_const * cpu_multi));
return 0;
}
static int
sf_FLD1(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(Const_1, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_z1) : (x87_timings.fld_z1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_z1) : (x87_concurrency.fld_z1 * cpu_multi));
return 0;
}
static int
sf_FLDZ(uint32_t fetchdat)
{
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (!IS_TAG_EMPTY(-1))
FPU_stack_overflow(fetchdat);
else {
FPU_push();
FPU_save_regi(Const_Z, 0);
}
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fld_z1) : (x87_timings.fld_z1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fld_z1) : (x87_concurrency.fld_z1 * cpu_multi));
return 0;
}
``` | /content/code_sandbox/src/cpu/x87_ops_sf_const.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,301 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* x86 CPU segment emulation for the 286/386 interpreter.
*
*
*
* Authors: Sarah Walker, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#ifndef OPS_286_386
# define OPS_286_386
#endif
#include "x86seg.c"
``` | /content/code_sandbox/src/cpu/x86seg_2386.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 141 |
```objective-c
#include "codegen_ops.h"
/*Instruction has input dependency on register in REG field*/
#define SRCDEP_REG (1ULL << 0)
/*Instruction has input dependency on register in R/M field*/
#define SRCDEP_RM (1ULL << 1)
/*Instruction modifies register in REG field*/
#define DSTDEP_REG (1ULL << 2)
/*Instruction modifies register in R/M field*/
#define DSTDEP_RM (1ULL << 3)
#define SRCDEP_SHIFT 4
#define DSTDEP_SHIFT 12
/*Instruction has input dependency on given register*/
#define SRCDEP_EAX (1ULL << 4)
#define SRCDEP_ECX (1ULL << 5)
#define SRCDEP_EDX (1ULL << 6)
#define SRCDEP_EBX (1ULL << 7)
#define SRCDEP_ESP (1ULL << 8)
#define SRCDEP_EBP (1ULL << 9)
#define SRCDEP_ESI (1ULL << 10)
#define SRCDEP_EDI (1ULL << 11)
/*Instruction modifies given register*/
#define DSTDEP_EAX (1ULL << 12)
#define DSTDEP_ECX (1ULL << 13)
#define DSTDEP_EDX (1ULL << 14)
#define DSTDEP_EBX (1ULL << 15)
#define DSTDEP_ESP (1ULL << 16)
#define DSTDEP_EBP (1ULL << 17)
#define DSTDEP_ESI (1ULL << 18)
#define DSTDEP_EDI (1ULL << 19)
/*Instruction has ModR/M byte*/
#define MODRM (1ULL << 20)
/*Instruction implicitly uses ESP*/
#define IMPL_ESP (1ULL << 21)
/*Instruction is MMX shift or pack/unpack instruction*/
#define MMX_SHIFTPACK (1ULL << 22)
/*Instruction is MMX multiply instruction*/
#define MMX_MULTIPLY (1ULL << 23)
/*Instruction pops the FPU stack*/
#define FPU_POP (1ULL << 24)
/*Instruction pops the FPU stack twice*/
#define FPU_POP2 (1ULL << 25)
/*Instruction pushes onto the FPU stack*/
#define FPU_PUSH (1ULL << 26)
/*Instruction writes to ST(0)*/
#define FPU_WRITE_ST0 (1ULL << 27)
/*Instruction reads from ST(0)*/
#define FPU_READ_ST0 (1ULL << 28)
/*Instruction reads from and writes to ST(0)*/
#define FPU_RW_ST0 (3ULL << 27)
/*Instruction reads from ST(1)*/
#define FPU_READ_ST1 (1ULL << 29)
/*Instruction writes to ST(1)*/
#define FPU_WRITE_ST1 (1ULL << 30)
/*Instruction reads from and writes to ST(1)*/
#define FPU_RW_ST1 (3ULL << 29)
/*Instruction reads from ST(reg)*/
#define FPU_READ_STREG (1ULL << 31)
/*Instruction writes to ST(reg)*/
#define FPU_WRITE_STREG (1ULL << 32)
/*Instruction reads from and writes to ST(reg)*/
#define FPU_RW_STREG (3ULL << 31)
#define FPU_FXCH (1ULL << 33)
#define HAS_IMM8 (1ULL << 34)
#define HAS_IMM1632 (1ULL << 35)
#define REGMASK_IMPL_ESP (1 << 8)
#define REGMASK_SHIFTPACK (1 << 9)
#define REGMASK_MULTIPLY (1 << 9)
extern uint64_t opcode_deps[256];
extern uint64_t opcode_deps_mod3[256];
extern uint64_t opcode_deps_0f[256];
extern uint64_t opcode_deps_0f_mod3[256];
extern uint64_t opcode_deps_0f0f[256];
extern uint64_t opcode_deps_0f0f_mod3[256];
extern uint64_t opcode_deps_shift[8];
extern uint64_t opcode_deps_shift_mod3[8];
extern uint64_t opcode_deps_shift_cl[8];
extern uint64_t opcode_deps_shift_cl_mod3[8];
extern uint64_t opcode_deps_f6[8];
extern uint64_t opcode_deps_f6_mod3[8];
extern uint64_t opcode_deps_f7[8];
extern uint64_t opcode_deps_f7_mod3[8];
extern uint64_t opcode_deps_ff[8];
extern uint64_t opcode_deps_ff_mod3[8];
extern uint64_t opcode_deps_d8[8];
extern uint64_t opcode_deps_d8_mod3[8];
extern uint64_t opcode_deps_d9[8];
extern uint64_t opcode_deps_d9_mod3[64];
extern uint64_t opcode_deps_da[8];
extern uint64_t opcode_deps_da_mod3[8];
extern uint64_t opcode_deps_db[8];
extern uint64_t opcode_deps_db_mod3[64];
extern uint64_t opcode_deps_dc[8];
extern uint64_t opcode_deps_dc_mod3[8];
extern uint64_t opcode_deps_dd[8];
extern uint64_t opcode_deps_dd_mod3[8];
extern uint64_t opcode_deps_de[8];
extern uint64_t opcode_deps_de_mod3[8];
extern uint64_t opcode_deps_df[8];
extern uint64_t opcode_deps_df_mod3[8];
extern uint64_t opcode_deps_81[8];
extern uint64_t opcode_deps_81_mod3[8];
extern uint64_t opcode_deps_8x[8];
extern uint64_t opcode_deps_8x_mod3[8];
static inline uint32_t
get_addr_regmask(uint64_t data, uint32_t fetchdat, int op_32)
{
uint32_t addr_regmask = 0;
if (data & MODRM) {
uint8_t modrm = fetchdat & 0xff;
if ((modrm & 0xc0) != 0xc0) {
if (op_32 & 0x200) {
if ((modrm & 0x7) == 4) {
uint8_t sib = (fetchdat >> 8) & 0xff;
if ((modrm & 0xc0) != 0xc0 && (sib & 7) != 5) {
addr_regmask = 1 << (sib & 7);
if ((sib & 0x38) != 0x20)
addr_regmask |= 1 << ((sib >> 3) & 7);
}
} else if ((modrm & 0xc7) != 5) {
addr_regmask = 1 << (modrm & 7);
}
} else {
if ((modrm & 0xc7) != 0x06) {
switch (modrm & 7) {
case 0:
addr_regmask = REG_BX | REG_SI;
break;
case 1:
addr_regmask = REG_BX | REG_DI;
break;
case 2:
addr_regmask = REG_BP | REG_SI;
break;
case 3:
addr_regmask = REG_BP | REG_DI;
break;
case 4:
addr_regmask = REG_SI;
break;
case 5:
addr_regmask = REG_DI;
break;
case 6:
addr_regmask = REG_BP;
break;
case 7:
addr_regmask = REG_BX;
break;
}
}
}
}
}
if (data & IMPL_ESP)
addr_regmask |= REGMASK_IMPL_ESP;
return addr_regmask;
}
static inline uint32_t
get_srcdep_mask(uint64_t data, uint32_t fetchdat, int bit8, int op_32)
{
uint32_t mask = 0;
if (data & SRCDEP_REG) {
int reg = (fetchdat >> 3) & 7;
if (bit8)
reg &= 3;
mask |= (1 << reg);
}
if (data & SRCDEP_RM) {
int reg = fetchdat & 7;
if (bit8)
reg &= 3;
mask |= (1 << reg);
}
mask |= ((data >> SRCDEP_SHIFT) & 0xff);
if (data & MMX_SHIFTPACK)
mask |= REGMASK_SHIFTPACK;
if (data & MMX_MULTIPLY)
mask |= REGMASK_MULTIPLY;
mask |= get_addr_regmask(data, fetchdat, op_32);
return mask;
}
static inline uint32_t
get_dstdep_mask(uint64_t data, uint32_t fetchdat, int bit8)
{
uint32_t mask = 0;
if (data & DSTDEP_REG) {
int reg = (fetchdat >> 3) & 7;
if (bit8)
reg &= 3;
mask |= (1 << reg);
}
if (data & DSTDEP_RM) {
int reg = fetchdat & 7;
if (bit8)
reg &= 3;
mask |= (1 << reg);
}
mask |= ((data >> DSTDEP_SHIFT) & 0xff);
if (data & MMX_SHIFTPACK)
mask |= REGMASK_SHIFTPACK;
if (data & MMX_MULTIPLY)
mask |= REGMASK_MULTIPLY;
if (data & IMPL_ESP)
mask |= REGMASK_IMPL_ESP | (1 << REG_ESP);
return mask;
}
``` | /content/code_sandbox/src/cpu/codegen_timing_common.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,050 |
```objective-c
static int
opMOV_w_seg_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
switch (rmdat & 0x38) {
case 0x00: /*ES*/
seteaw(ES);
break;
case 0x08: /*CS*/
seteaw(CS);
break;
case 0x18: /*DS*/
seteaw(DS);
break;
case 0x10: /*SS*/
seteaw(SS);
break;
case 0x20: /*FS*/
seteaw(FS);
break;
case 0x28: /*GS*/
seteaw(GS);
break;
}
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 3);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 3, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_w_seg_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
switch (rmdat & 0x38) {
case 0x00: /*ES*/
seteaw(ES);
break;
case 0x08: /*CS*/
seteaw(CS);
break;
case 0x18: /*DS*/
seteaw(DS);
break;
case 0x10: /*SS*/
seteaw(SS);
break;
case 0x20: /*FS*/
seteaw(FS);
break;
case 0x28: /*GS*/
seteaw(GS);
break;
}
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 3);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 3, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return cpu_state.abrt;
}
static int
opMOV_l_seg_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
switch (rmdat & 0x38) {
case 0x00: /*ES*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = ES;
else
seteaw(ES);
break;
case 0x08: /*CS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = CS;
else
seteaw(CS);
break;
case 0x18: /*DS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = DS;
else
seteaw(DS);
break;
case 0x10: /*SS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = SS;
else
seteaw(SS);
break;
case 0x20: /*FS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = FS;
else
seteaw(FS);
break;
case 0x28: /*GS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = GS;
else
seteaw(GS);
break;
}
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 3);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 3, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_l_seg_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
switch (rmdat & 0x38) {
case 0x00: /*ES*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = ES;
else
seteaw(ES);
break;
case 0x08: /*CS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = CS;
else
seteaw(CS);
break;
case 0x18: /*DS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = DS;
else
seteaw(DS);
break;
case 0x10: /*SS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = SS;
else
seteaw(SS);
break;
case 0x20: /*FS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = FS;
else
seteaw(FS);
break;
case 0x28: /*GS*/
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].l = GS;
else
seteaw(GS);
break;
}
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 3);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 3, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return cpu_state.abrt;
}
static int
opMOV_seg_w_a16(uint32_t fetchdat)
{
uint16_t new_seg;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
new_seg = geteaw();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*ES*/
op_loadseg(new_seg, &cpu_state.seg_es);
break;
case 0x18: /*DS*/
op_loadseg(new_seg, &cpu_state.seg_ds);
break;
case 0x10: /*SS*/
op_loadseg(new_seg, &cpu_state.seg_ss);
if (cpu_state.abrt)
return 1;
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ssegs = 0;
cpu_state.ea_seg = &cpu_state.seg_ds;
fetchdat = fastreadl(cs + cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
#ifdef OPS_286_386
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#else
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#endif
return 1;
case 0x20: /*FS*/
op_loadseg(new_seg, &cpu_state.seg_fs);
break;
case 0x28: /*GS*/
op_loadseg(new_seg, &cpu_state.seg_gs);
break;
}
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return cpu_state.abrt;
}
static int
opMOV_seg_w_a32(uint32_t fetchdat)
{
uint16_t new_seg;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
new_seg = geteaw();
if (cpu_state.abrt)
return 1;
switch (rmdat & 0x38) {
case 0x00: /*ES*/
op_loadseg(new_seg, &cpu_state.seg_es);
break;
case 0x18: /*DS*/
op_loadseg(new_seg, &cpu_state.seg_ds);
break;
case 0x10: /*SS*/
op_loadseg(new_seg, &cpu_state.seg_ss);
if (cpu_state.abrt)
return 1;
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ssegs = 0;
cpu_state.ea_seg = &cpu_state.seg_ds;
fetchdat = fastreadl(cs + cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
#ifdef OPS_286_386
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#else
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#endif
return 1;
case 0x20: /*FS*/
op_loadseg(new_seg, &cpu_state.seg_fs);
break;
case 0x28: /*GS*/
op_loadseg(new_seg, &cpu_state.seg_gs);
break;
}
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 2 : 5, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return cpu_state.abrt;
}
static int
opLDS_w_a16(uint32_t fetchdat)
{
uint16_t addr;
uint16_t seg;
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
addr = readmemw(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ds);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 0);
return 0;
}
static int
opLDS_w_a32(uint32_t fetchdat)
{
uint16_t addr;
uint16_t seg;
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
addr = readmemw(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ds);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 1);
return 0;
}
static int
opLDS_l_a16(uint32_t fetchdat)
{
uint32_t addr;
uint16_t seg;
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5);
addr = readmeml(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ds);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 1, 1, 0, 0, 0);
return 0;
}
static int
opLDS_l_a32(uint32_t fetchdat)
{
uint32_t addr;
uint16_t seg;
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5);
addr = readmeml(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ds);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 1, 1, 0, 0, 1);
return 0;
}
static int
opLSS_w_a16(uint32_t fetchdat)
{
uint16_t addr;
uint16_t seg;
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
addr = readmemw(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ss);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 0);
return 1;
}
static int
opLSS_w_a32(uint32_t fetchdat)
{
uint16_t addr;
uint16_t seg;
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
addr = readmemw(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 2);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ss);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 1);
return 1;
}
static int
opLSS_l_a16(uint32_t fetchdat)
{
uint32_t addr;
uint16_t seg;
fetch_ea_16(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5);
addr = readmeml(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ss);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 0);
return 1;
}
static int
opLSS_l_a32(uint32_t fetchdat)
{
uint32_t addr;
uint16_t seg;
fetch_ea_32(fetchdat);
ILLEGAL_ON(cpu_mod == 3);
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5);
addr = readmeml(easeg, cpu_state.eaaddr);
seg = readmemw(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 1;
op_loadseg(seg, &cpu_state.seg_ss);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = addr;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 1);
return 1;
}
#define opLsel(name, sel) \
static int opL##name##_w_a16(uint32_t fetchdat) \
{ \
uint16_t addr, seg; \
\
fetch_ea_16(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
ILLEGAL_ON(cpu_mod == 3); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3); \
addr = readmemw(easeg, cpu_state.eaaddr); \
seg = readmemw(easeg, cpu_state.eaaddr + 2); \
if (cpu_state.abrt) \
return 1; \
op_loadseg(seg, &sel); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].w = addr; \
\
CLOCK_CYCLES(7); \
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 0); \
return 0; \
} \
\
static int opL##name##_w_a32(uint32_t fetchdat) \
{ \
uint16_t addr, seg; \
\
fetch_ea_32(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
ILLEGAL_ON(cpu_mod == 3); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3); \
addr = readmemw(easeg, cpu_state.eaaddr); \
seg = readmemw(easeg, cpu_state.eaaddr + 2); \
if (cpu_state.abrt) \
return 1; \
op_loadseg(seg, &sel); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].w = addr; \
\
CLOCK_CYCLES(7); \
PREFETCH_RUN(7, 2, rmdat, 2, 0, 0, 0, 1); \
return 0; \
} \
\
static int opL##name##_l_a16(uint32_t fetchdat) \
{ \
uint32_t addr; \
uint16_t seg; \
\
fetch_ea_16(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
ILLEGAL_ON(cpu_mod == 3); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5); \
addr = readmeml(easeg, cpu_state.eaaddr); \
seg = readmemw(easeg, cpu_state.eaaddr + 4); \
if (cpu_state.abrt) \
return 1; \
op_loadseg(seg, &sel); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].l = addr; \
\
CLOCK_CYCLES(7); \
PREFETCH_RUN(7, 2, rmdat, 1, 1, 0, 0, 0); \
return 0; \
} \
\
static int opL##name##_l_a32(uint32_t fetchdat) \
{ \
uint32_t addr; \
uint16_t seg; \
\
fetch_ea_32(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
ILLEGAL_ON(cpu_mod == 3); \
CHECK_READ(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 5); \
addr = readmeml(easeg, cpu_state.eaaddr); \
seg = readmemw(easeg, cpu_state.eaaddr + 4); \
if (cpu_state.abrt) \
return 1; \
op_loadseg(seg, &sel); \
if (cpu_state.abrt) \
return 1; \
cpu_state.regs[cpu_reg].l = addr; \
\
CLOCK_CYCLES(7); \
PREFETCH_RUN(7, 2, rmdat, 1, 1, 0, 0, 1); \
return 0; \
}
// clang-format off
opLsel(ES, cpu_state.seg_es)
opLsel(FS, cpu_state.seg_fs)
opLsel(GS, cpu_state.seg_gs)
// clang-format on
``` | /content/code_sandbox/src/cpu/x86_ops_mov_seg.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 4,980 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* CPU type handler.
*
* Authors: Sarah Walker, <path_to_url
* leilei,
* Miran Grca, <mgrca8@gmail.com>
* Fred N. van Kempen, <decwiz@yahoo.com>
*
*/
#include <inttypes.h>
#include <math.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include "x86.h"
#include "x87_sf.h"
#include <86box/device.h>
#include <86box/machine.h>
#include <86box/io.h>
#include "x86_ops.h"
#include "x86seg_common.h"
#include <86box/mem.h>
#include <86box/nmi.h>
#include <86box/pic.h>
#include <86box/pci.h>
#include <86box/timer.h>
#include <86box/gdbstub.h>
#include <86box/plat_fallthrough.h>
#include <86box/plat_unused.h>
#ifdef USE_DYNAREC
# include "codegen.h"
#endif /* USE_DYNAREC */
#include "x87_timings.h"
#define CCR1_USE_SMI (1 << 1)
#define CCR1_SMAC (1 << 2)
#define CCR1_SM3 (1 << 7)
#define CCR3_SMI_LOCK (1 << 0)
#define CCR3_NMI_EN (1 << 1)
enum {
CPUID_FPU = (1 << 0), /* On-chip Floating Point Unit */
CPUID_VME = (1 << 1), /* Virtual 8086 mode extensions */
CPUID_DE = (1 << 2), /* Debugging extensions */
CPUID_PSE = (1 << 3), /* Page Size Extension */
CPUID_TSC = (1 << 4), /* Time Stamp Counter */
CPUID_MSR = (1 << 5), /* Model-specific registers */
CPUID_PAE = (1 << 6), /* Physical Address Extension */
CPUID_MCE = (1 << 7), /* Machine Check Exception */
CPUID_CMPXCHG8B = (1 << 8), /* CMPXCHG8B instruction */
CPUID_APIC = (1 << 9), /* On-chip APIC */
CPUID_AMDPGE = (1 << 9), /* Global Page Enable (AMD K5 Model 0 only) */
CPUID_AMDSEP = (1 << 10), /* SYSCALL and SYSRET instructions (AMD K6 only) */
CPUID_SEP = (1 << 11), /* SYSENTER and SYSEXIT instructions (SYSCALL and SYSRET if EAX=80000001h) */
CPUID_MTRR = (1 << 12), /* Memory type range registers */
CPUID_PGE = (1 << 13), /* Page Global Enable */
CPUID_MCA = (1 << 14), /* Machine Check Architecture */
CPUID_CMOV = (1 << 15), /* Conditional move instructions */
CPUID_PAT = (1 << 16), /* Page Attribute Table */
CPUID_MMX = (1 << 23), /* MMX technology */
CPUID_FXSR = (1 << 24) /* FXSAVE and FXRSTOR instructions */
};
/* Additional flags returned by CPUID function 0x80000001 */
#define CPUID_3DNOWE (1UL << 30UL) /* Extended 3DNow! instructions */
#define CPUID_3DNOW (1UL << 31UL) /* 3DNow! instructions */
/* Remove the Debugging Extensions CPUID flag if not compiled
with debug register support for 486 and later CPUs. */
#ifndef USE_DEBUG_REGS_486
# define CPUID_DE 0
#endif
/* Make sure this is as low as possible. */
cpu_state_t cpu_state;
fpu_state_t fpu_state;
/* Place this immediately after. */
uint32_t abrt_error;
#ifdef USE_DYNAREC
const OpFn *x86_dynarec_opcodes;
const OpFn *x86_dynarec_opcodes_0f;
const OpFn *x86_dynarec_opcodes_d8_a16;
const OpFn *x86_dynarec_opcodes_d8_a32;
const OpFn *x86_dynarec_opcodes_d9_a16;
const OpFn *x86_dynarec_opcodes_d9_a32;
const OpFn *x86_dynarec_opcodes_da_a16;
const OpFn *x86_dynarec_opcodes_da_a32;
const OpFn *x86_dynarec_opcodes_db_a16;
const OpFn *x86_dynarec_opcodes_db_a32;
const OpFn *x86_dynarec_opcodes_dc_a16;
const OpFn *x86_dynarec_opcodes_dc_a32;
const OpFn *x86_dynarec_opcodes_dd_a16;
const OpFn *x86_dynarec_opcodes_dd_a32;
const OpFn *x86_dynarec_opcodes_de_a16;
const OpFn *x86_dynarec_opcodes_de_a32;
const OpFn *x86_dynarec_opcodes_df_a16;
const OpFn *x86_dynarec_opcodes_df_a32;
const OpFn *x86_dynarec_opcodes_REPE;
const OpFn *x86_dynarec_opcodes_REPNE;
const OpFn *x86_dynarec_opcodes_3DNOW;
#endif /* USE_DYNAREC */
const OpFn *x86_opcodes;
const OpFn *x86_opcodes_0f;
const OpFn *x86_opcodes_d8_a16;
const OpFn *x86_opcodes_d8_a32;
const OpFn *x86_opcodes_d9_a16;
const OpFn *x86_opcodes_d9_a32;
const OpFn *x86_opcodes_da_a16;
const OpFn *x86_opcodes_da_a32;
const OpFn *x86_opcodes_db_a16;
const OpFn *x86_opcodes_db_a32;
const OpFn *x86_opcodes_dc_a16;
const OpFn *x86_opcodes_dc_a32;
const OpFn *x86_opcodes_dd_a16;
const OpFn *x86_opcodes_dd_a32;
const OpFn *x86_opcodes_de_a16;
const OpFn *x86_opcodes_de_a32;
const OpFn *x86_opcodes_df_a16;
const OpFn *x86_opcodes_df_a32;
const OpFn *x86_opcodes_REPE;
const OpFn *x86_opcodes_REPNE;
const OpFn *x86_opcodes_3DNOW;
const OpFn *x86_2386_opcodes;
const OpFn *x86_2386_opcodes_0f;
const OpFn *x86_2386_opcodes_d8_a16;
const OpFn *x86_2386_opcodes_d8_a32;
const OpFn *x86_2386_opcodes_d9_a16;
const OpFn *x86_2386_opcodes_d9_a32;
const OpFn *x86_2386_opcodes_da_a16;
const OpFn *x86_2386_opcodes_da_a32;
const OpFn *x86_2386_opcodes_db_a16;
const OpFn *x86_2386_opcodes_db_a32;
const OpFn *x86_2386_opcodes_dc_a16;
const OpFn *x86_2386_opcodes_dc_a32;
const OpFn *x86_2386_opcodes_dd_a16;
const OpFn *x86_2386_opcodes_dd_a32;
const OpFn *x86_2386_opcodes_de_a16;
const OpFn *x86_2386_opcodes_de_a32;
const OpFn *x86_2386_opcodes_df_a16;
const OpFn *x86_2386_opcodes_df_a32;
const OpFn *x86_2386_opcodes_REPE;
const OpFn *x86_2386_opcodes_REPNE;
uint16_t cpu_fast_off_count;
uint16_t cpu_fast_off_val;
uint16_t temp_seg_data[4] = { 0, 0, 0, 0 };
int isa_cycles;
int cpu_inited;
int cpu_cycles_read;
int cpu_cycles_read_l;
int cpu_cycles_write;
int cpu_cycles_write_l;
int cpu_prefetch_cycles;
int cpu_prefetch_width;
int cpu_mem_prefetch_cycles;
int cpu_rom_prefetch_cycles;
int cpu_waitstates;
int cpu_cache_int_enabled;
int cpu_cache_ext_enabled;
int cpu_isa_speed;
int cpu_pci_speed;
int cpu_isa_pci_div;
int cpu_agp_speed;
int cpu_alt_reset;
int cpu_override;
int cpu_effective;
int cpu_multi;
int cpu_16bitbus;
int cpu_64bitbus;
int cpu_cyrix_alignment;
int cpu_cpurst_on_sr;
int cpu_use_exec = 0;
int cpu_override_interpreter;
int CPUID;
int is186;
int is_nec;
int is286;
int is386;
int is6117;
int is486 = 1;
int cpu_isintel;
int cpu_iscyrix;
int hascache;
int isibm486;
int israpidcad;
int is_vpc;
int is_am486;
int is_am486dxl;
int is_pentium;
int is_k5;
int is_k6;
int is_p6;
int is_cxsmm;
int hasfpu;
int timing_rr;
int timing_mr;
int timing_mrl;
int timing_rm;
int timing_rml;
int timing_mm;
int timing_mml;
int timing_bt;
int timing_bnt;
int timing_int;
int timing_int_rm;
int timing_int_v86;
int timing_int_pm;
int timing_int_pm_outer;
int timing_iret_rm;
int timing_iret_v86;
int timing_iret_pm;
int timing_iret_pm_outer;
int timing_call_rm;
int timing_call_pm;
int timing_call_pm_gate;
int timing_call_pm_gate_inner;
int timing_retf_rm;
int timing_retf_pm;
int timing_retf_pm_outer;
int timing_jmp_rm;
int timing_jmp_pm;
int timing_jmp_pm_gate;
int timing_misaligned;
uint32_t cpu_features;
uint32_t cpu_fast_off_flags;
uint32_t _tr[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
uint32_t cache_index = 0;
uint8_t _cache[2048];
uint64_t cpu_CR4_mask;
uint64_t tsc = 0;
double cpu_dmulti;
double cpu_busspeed;
msr_t msr;
cyrix_t cyrix;
cpu_family_t *cpu_f;
CPU *cpu_s;
uint8_t do_translate = 0;
uint8_t do_translate2 = 0;
void (*cpu_exec)(int32_t cycs);
static uint8_t ccr0;
static uint8_t ccr1;
static uint8_t ccr2;
static uint8_t ccr3;
static uint8_t ccr4;
static uint8_t ccr5;
static uint8_t ccr6;
static int cyrix_addr;
static void cpu_write(uint16_t addr, uint8_t val, void *priv);
static uint8_t cpu_read(uint16_t addr, void *priv);
#ifdef ENABLE_CPU_LOG
int cpu_do_log = ENABLE_CPU_LOG;
void
cpu_log(const char *fmt, ...)
{
va_list ap;
if (cpu_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define cpu_log(fmt, ...)
#endif
int
cpu_has_feature(int feature)
{
return cpu_features & feature;
}
void
cpu_dynamic_switch(int new_cpu)
{
int c;
if (cpu_effective == new_cpu)
return;
c = cpu;
cpu = new_cpu;
cpu_set();
pc_speed_changed();
cpu = c;
}
void
cpu_set_edx(void)
{
EDX = cpu_s->edx_reset;
if (fpu_softfloat)
SF_FPU_reset();
}
cpu_family_t *
cpu_get_family(const char *internal_name)
{
int c = 0;
while (cpu_families[c].package) {
if (!strcmp(internal_name, cpu_families[c].internal_name))
return (cpu_family_t *) &cpu_families[c];
c++;
}
return NULL;
}
uint8_t
cpu_is_eligible(const cpu_family_t *cpu_family, int cpu, int machine)
{
const machine_t *machine_s = &machines[machine];
const CPU *cpu_s = &cpu_family->cpus[cpu];
uint32_t packages;
uint32_t bus_speed;
uint8_t i;
double multi;
/* Full override. */
if (cpu_override > 1)
return 1;
/* Add implicit CPU package compatibility. */
packages = machine_s->cpu.package;
if (packages & CPU_PKG_SOCKET3)
packages |= CPU_PKG_SOCKET1;
else if (packages & CPU_PKG_SLOT1)
packages |= CPU_PKG_SOCKET370 | CPU_PKG_SOCKET8;
/* Package type. */
if (!(cpu_family->package & packages))
return 0;
/* Partial override. */
if (cpu_override)
return 1;
/* Check CPU blocklist. */
if (machine_s->cpu.block) {
i = 0;
while (machine_s->cpu.block[i]) {
if (machine_s->cpu.block[i++] == cpu_s->cpu_type)
return 0;
}
}
bus_speed = cpu_s->rspeed / cpu_s->multi;
/* Minimum bus speed with ~0.84 MHz (for 8086) tolerance. */
if (machine_s->cpu.min_bus && (bus_speed < (machine_s->cpu.min_bus - 840907)))
return 0;
/* Maximum bus speed with ~0.84 MHz (for 8086) tolerance. */
if (machine_s->cpu.max_bus && (bus_speed > (machine_s->cpu.max_bus + 840907)))
return 0;
/* Minimum voltage with 0.1V tolerance. */
if (machine_s->cpu.min_voltage && (cpu_s->voltage < (machine_s->cpu.min_voltage - 100)))
return 0;
/* Maximum voltage with 0.1V tolerance. */
if (machine_s->cpu.max_voltage && (cpu_s->voltage > (machine_s->cpu.max_voltage + 100)))
return 0;
/* Account for CPUs which use a different internal multiplier than specified by jumpers. */
multi = cpu_s->multi;
/* Don't care about multiplier compatibility on fixed multiplier CPUs. */
if (cpu_s->cpu_flags & CPU_FIXED_MULTIPLIER)
return 1;
else if (cpu_family->package & CPU_PKG_SOCKET5_7) {
if ((multi == 1.5) && (cpu_s->cpu_type == CPU_5K86) && (machine_s->cpu.min_multi > 1.5)) /* K5 5k86 */
multi = 2.0;
else if (multi == 1.75) /* K5 5k86 */
multi = 2.5;
else if (multi == 2.0) {
if (cpu_s->cpu_type == CPU_5K86) /* K5 5k86 */
multi = 3.0;
/* K6-2+ / K6-3+ */
else if ((cpu_s->cpu_type == CPU_K6_2P) || (cpu_s->cpu_type == CPU_K6_3P))
multi = 2.5;
else if (((cpu_s->cpu_type == CPU_WINCHIP) || (cpu_s->cpu_type == CPU_WINCHIP2)) && (machine_s->cpu.min_multi > 2.0)) /* WinChip (2) */
multi = 2.5;
} else if (multi == (7.0 / 3.0)) /* WinChip 2A - 2.33x */
multi = 5.0;
else if (multi == (8.0 / 3.0)) /* WinChip 2A - 2.66x */
multi = 5.5;
else if ((multi == 3.0) && (cpu_s->cpu_type == CPU_Cx6x86 || cpu_s->cpu_type == CPU_Cx6x86L)) /* 6x86(L) */
multi = 1.5;
else if (multi == (10.0 / 3.0)) /* WinChip 2A - 3.33x */
multi = 2.0;
else if (multi == 3.5) /* standard set by the Pentium MMX */
multi = 1.5;
else if (multi == 4.0) {
/* WinChip (2) */
if ((cpu_s->cpu_type == CPU_WINCHIP) || (cpu_s->cpu_type == CPU_WINCHIP2)) {
if (machine_s->cpu.min_multi >= 1.5)
multi = 1.5;
else if (machine_s->cpu.min_multi >= 3.5)
multi = 3.5;
else if (machine_s->cpu.min_multi >= 4.5)
multi = 4.5;
} else if ((cpu_s->cpu_type == CPU_Cx6x86) || (cpu_s->cpu_type == CPU_Cx6x86L)) /* 6x86(L) */
multi = 3.0;
} else if ((multi == 5.0) && ((cpu_s->cpu_type == CPU_WINCHIP) || (cpu_s->cpu_type == CPU_WINCHIP2)) && (machine_s->cpu.min_multi > 5.0)) /* WinChip (2) */
multi = 5.5;
else if (multi == 6.0) /* K6-2(+) / K6-3(+) */
multi = 2.0;
}
/* Minimum multiplier, */
if (multi < machine_s->cpu.min_multi)
return 0;
/* Maximum multiplier. */
if (machine_s->cpu.max_multi && (multi > machine_s->cpu.max_multi))
return 0;
return 1;
}
uint8_t
cpu_family_is_eligible(const cpu_family_t *cpu_family, int machine)
{
int c = 0;
while (cpu_family->cpus[c].cpu_type) {
if (cpu_is_eligible(cpu_family, c, machine))
return 1;
c++;
}
return 0;
}
void
SF_FPU_reset(void)
{
if (fpu_type != FPU_NONE) {
fpu_state.cwd = 0x0040;
fpu_state.swd = 0;
fpu_state.tos = 0;
fpu_state.tag = 0x5555;
fpu_state.foo = 0;
fpu_state.fip = 0;
fpu_state.fcs = 0;
fpu_state.fds = 0;
fpu_state.fdp = 0;
memset(fpu_state.st_space, 0, sizeof(floatx80) * 8);
}
}
void
cpu_set(void)
{
cpu_inited = 1;
cpu_effective = cpu;
cpu_s = (CPU *) &cpu_f->cpus[cpu_effective];
#ifdef USE_ACYCS
acycs = 0;
#endif /* USE_ACYCS */
soft_reset_pci = 0;
cpu_init = 0;
cpu_alt_reset = 0;
unmask_a20_in_smm = 0;
CPUID = cpu_s->cpuid_model;
is8086 = (cpu_s->cpu_type > CPU_8088) && (cpu_s->cpu_type != CPU_V20) && (cpu_s->cpu_type != CPU_188);
is_nec = (cpu_s->cpu_type == CPU_V20) || (cpu_s->cpu_type == CPU_V30);
is186 = (cpu_s->cpu_type == CPU_186) || (cpu_s->cpu_type == CPU_188) || (cpu_s->cpu_type == CPU_V20) || (cpu_s->cpu_type == CPU_V30);
is286 = (cpu_s->cpu_type >= CPU_286);
is386 = (cpu_s->cpu_type >= CPU_386SX);
israpidcad = (cpu_s->cpu_type == CPU_RAPIDCAD);
isibm486 = (cpu_s->cpu_type == CPU_IBM386SLC) || (cpu_s->cpu_type == CPU_IBM486SLC) || (cpu_s->cpu_type == CPU_IBM486BL);
is486 = (cpu_s->cpu_type >= CPU_RAPIDCAD);
is_am486 = (cpu_s->cpu_type == CPU_ENH_Am486DX);
is_am486dxl = (cpu_s->cpu_type == CPU_Am486DXL);
is6117 = !strcmp(cpu_f->manufacturer, "ALi");
cpu_isintel = !strcmp(cpu_f->manufacturer, "Intel");
cpu_iscyrix = !strcmp(cpu_f->manufacturer, "Cyrix") || !strcmp(cpu_f->manufacturer, "ST");
/* SL-Enhanced Intel 486s have the same SMM save state table layout as Pentiums,
and the WinChip datasheet claims those are Pentium-compatible as well. AMD Am486DXL/DXL2 also has compatible SMM, or would if not for it's different SMBase*/
is_pentium = (cpu_isintel && (cpu_s->cpu_type >= CPU_i486SX_SLENH) && (cpu_s->cpu_type < CPU_PENTIUMPRO)) || !strcmp(cpu_f->manufacturer, "IDT") || (cpu_s->cpu_type == CPU_Am486DXL);
is_k5 = !strcmp(cpu_f->manufacturer, "AMD") && (cpu_s->cpu_type > CPU_ENH_Am486DX) && (cpu_s->cpu_type < CPU_K6);
is_k6 = (cpu_s->cpu_type >= CPU_K6) && !strcmp(cpu_f->manufacturer, "AMD");
/* The Samuel 2 datasheet claims it's Celeron-compatible. */
is_p6 = (cpu_isintel && (cpu_s->cpu_type >= CPU_PENTIUMPRO)) || !strcmp(cpu_f->manufacturer, "VIA");
is_cxsmm = (!strcmp(cpu_f->manufacturer, "Cyrix") || !strcmp(cpu_f->manufacturer, "ST")) && (cpu_s->cpu_type >= CPU_Cx486S);
cpu_isintel = cpu_isintel || !strcmp(cpu_f->manufacturer, "AMD");
hasfpu = (fpu_type != FPU_NONE);
hascache = (cpu_s->cpu_type >= CPU_486SLC) || (cpu_s->cpu_type == CPU_IBM386SLC) || (cpu_s->cpu_type == CPU_IBM486SLC) || (cpu_s->cpu_type == CPU_IBM486BL);
cpu_16bitbus = (cpu_s->cpu_type == CPU_286) || (cpu_s->cpu_type == CPU_386SX) || (cpu_s->cpu_type == CPU_486SLC) || (cpu_s->cpu_type == CPU_IBM386SLC) || (cpu_s->cpu_type == CPU_IBM486SLC);
cpu_64bitbus = (cpu_s->cpu_type >= CPU_WINCHIP);
if (cpu_s->multi)
cpu_busspeed = cpu_s->rspeed / cpu_s->multi;
else
cpu_busspeed = cpu_s->rspeed;
cpu_multi = (int) ceil(cpu_s->multi);
cpu_dmulti = cpu_s->multi;
ccr0 = ccr1 = ccr2 = ccr3 = ccr4 = ccr5 = ccr6 = 0;
cpu_update_waitstates();
isa_cycles = cpu_s->atclk_div;
if (cpu_s->rspeed <= 8000000)
cpu_rom_prefetch_cycles = cpu_mem_prefetch_cycles;
else
cpu_rom_prefetch_cycles = cpu_s->rspeed / 1000000;
cpu_set_isa_pci_div(0);
cpu_set_pci_speed(0);
cpu_set_agp_speed(0);
io_handler(cpu_iscyrix, 0x0022, 0x0002, cpu_read, NULL, NULL, cpu_write, NULL, NULL, NULL);
io_handler(hasfpu, 0x00f0, 0x000f, cpu_read, NULL, NULL, cpu_write, NULL, NULL, NULL);
io_handler(hasfpu, 0xf007, 0x0001, cpu_read, NULL, NULL, cpu_write, NULL, NULL, NULL);
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_386_0f, dynarec_ops_386, dynarec_ops_386_0f);
#else
x86_setopcodes(ops_386, ops_386_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_386, ops_2386_386_0f);
x86_opcodes_REPE = ops_REPE;
x86_opcodes_REPNE = ops_REPNE;
x86_2386_opcodes_REPE = ops_2386_REPE;
x86_2386_opcodes_REPNE = ops_2386_REPNE;
x86_opcodes_3DNOW = ops_3DNOW;
#ifdef USE_DYNAREC
x86_dynarec_opcodes_REPE = dynarec_ops_REPE;
x86_dynarec_opcodes_REPNE = dynarec_ops_REPNE;
x86_dynarec_opcodes_3DNOW = dynarec_ops_3DNOW;
#endif /* USE_DYNAREC */
if (hasfpu) {
#ifdef USE_DYNAREC
if (fpu_softfloat) {
x86_dynarec_opcodes_d8_a16 = dynarec_ops_sf_fpu_d8_a16;
x86_dynarec_opcodes_d8_a32 = dynarec_ops_sf_fpu_d8_a32;
x86_dynarec_opcodes_d9_a16 = dynarec_ops_sf_fpu_d9_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_sf_fpu_d9_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_sf_fpu_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_sf_fpu_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_sf_fpu_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_sf_fpu_db_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_sf_fpu_dc_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_sf_fpu_dc_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_sf_fpu_dd_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_sf_fpu_dd_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_sf_fpu_de_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_sf_fpu_de_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_sf_fpu_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_sf_fpu_df_a32;
} else {
x86_dynarec_opcodes_d8_a16 = dynarec_ops_fpu_d8_a16;
x86_dynarec_opcodes_d8_a32 = dynarec_ops_fpu_d8_a32;
x86_dynarec_opcodes_d9_a16 = dynarec_ops_fpu_d9_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_fpu_d9_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_fpu_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_fpu_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_fpu_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_fpu_db_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_fpu_dc_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_fpu_dc_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_fpu_dd_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_fpu_dd_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_fpu_de_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_fpu_de_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_fpu_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_fpu_df_a32;
}
#endif /* USE_DYNAREC */
if (fpu_softfloat) {
x86_opcodes_d8_a16 = ops_sf_fpu_d8_a16;
x86_opcodes_d8_a32 = ops_sf_fpu_d8_a32;
x86_opcodes_d9_a16 = ops_sf_fpu_d9_a16;
x86_opcodes_d9_a32 = ops_sf_fpu_d9_a32;
x86_opcodes_da_a16 = ops_sf_fpu_da_a16;
x86_opcodes_da_a32 = ops_sf_fpu_da_a32;
x86_opcodes_db_a16 = ops_sf_fpu_db_a16;
x86_opcodes_db_a32 = ops_sf_fpu_db_a32;
x86_opcodes_dc_a16 = ops_sf_fpu_dc_a16;
x86_opcodes_dc_a32 = ops_sf_fpu_dc_a32;
x86_opcodes_dd_a16 = ops_sf_fpu_dd_a16;
x86_opcodes_dd_a32 = ops_sf_fpu_dd_a32;
x86_opcodes_de_a16 = ops_sf_fpu_de_a16;
x86_opcodes_de_a32 = ops_sf_fpu_de_a32;
x86_opcodes_df_a16 = ops_sf_fpu_df_a16;
x86_opcodes_df_a32 = ops_sf_fpu_df_a32;
x86_2386_opcodes_d8_a16 = ops_2386_sf_fpu_d8_a16;
x86_2386_opcodes_d8_a32 = ops_2386_sf_fpu_d8_a32;
x86_2386_opcodes_d9_a16 = ops_2386_sf_fpu_d9_a16;
x86_2386_opcodes_d9_a32 = ops_2386_sf_fpu_d9_a32;
x86_2386_opcodes_da_a16 = ops_2386_sf_fpu_da_a16;
x86_2386_opcodes_da_a32 = ops_2386_sf_fpu_da_a32;
x86_2386_opcodes_db_a16 = ops_2386_sf_fpu_db_a16;
x86_2386_opcodes_db_a32 = ops_2386_sf_fpu_db_a32;
x86_2386_opcodes_dc_a16 = ops_2386_sf_fpu_dc_a16;
x86_2386_opcodes_dc_a32 = ops_2386_sf_fpu_dc_a32;
x86_2386_opcodes_dd_a16 = ops_2386_sf_fpu_dd_a16;
x86_2386_opcodes_dd_a32 = ops_2386_sf_fpu_dd_a32;
x86_2386_opcodes_de_a16 = ops_2386_sf_fpu_de_a16;
x86_2386_opcodes_de_a32 = ops_2386_sf_fpu_de_a32;
x86_2386_opcodes_df_a16 = ops_2386_sf_fpu_df_a16;
x86_2386_opcodes_df_a32 = ops_2386_sf_fpu_df_a32;
} else {
x86_opcodes_d8_a16 = ops_fpu_d8_a16;
x86_opcodes_d8_a32 = ops_fpu_d8_a32;
x86_opcodes_d9_a16 = ops_fpu_d9_a16;
x86_opcodes_d9_a32 = ops_fpu_d9_a32;
x86_opcodes_da_a16 = ops_fpu_da_a16;
x86_opcodes_da_a32 = ops_fpu_da_a32;
x86_opcodes_db_a16 = ops_fpu_db_a16;
x86_opcodes_db_a32 = ops_fpu_db_a32;
x86_opcodes_dc_a16 = ops_fpu_dc_a16;
x86_opcodes_dc_a32 = ops_fpu_dc_a32;
x86_opcodes_dd_a16 = ops_fpu_dd_a16;
x86_opcodes_dd_a32 = ops_fpu_dd_a32;
x86_opcodes_de_a16 = ops_fpu_de_a16;
x86_opcodes_de_a32 = ops_fpu_de_a32;
x86_opcodes_df_a16 = ops_fpu_df_a16;
x86_opcodes_df_a32 = ops_fpu_df_a32;
x86_2386_opcodes_d8_a16 = ops_2386_fpu_d8_a16;
x86_2386_opcodes_d8_a32 = ops_2386_fpu_d8_a32;
x86_2386_opcodes_d9_a16 = ops_2386_fpu_d9_a16;
x86_2386_opcodes_d9_a32 = ops_2386_fpu_d9_a32;
x86_2386_opcodes_da_a16 = ops_2386_fpu_da_a16;
x86_2386_opcodes_da_a32 = ops_2386_fpu_da_a32;
x86_2386_opcodes_db_a16 = ops_2386_fpu_db_a16;
x86_2386_opcodes_db_a32 = ops_2386_fpu_db_a32;
x86_2386_opcodes_dc_a16 = ops_2386_fpu_dc_a16;
x86_2386_opcodes_dc_a32 = ops_2386_fpu_dc_a32;
x86_2386_opcodes_dd_a16 = ops_2386_fpu_dd_a16;
x86_2386_opcodes_dd_a32 = ops_2386_fpu_dd_a32;
x86_2386_opcodes_de_a16 = ops_2386_fpu_de_a16;
x86_2386_opcodes_de_a32 = ops_2386_fpu_de_a32;
x86_2386_opcodes_df_a16 = ops_2386_fpu_df_a16;
x86_2386_opcodes_df_a32 = ops_2386_fpu_df_a32;
}
} else {
#ifdef USE_DYNAREC
x86_dynarec_opcodes_d8_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_d8_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_d9_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_nofpu_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_nofpu_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_nofpu_a32;
#endif /* USE_DYNAREC */
x86_opcodes_d8_a16 = ops_nofpu_a16;
x86_opcodes_d8_a32 = ops_nofpu_a32;
x86_opcodes_d9_a16 = ops_nofpu_a16;
x86_opcodes_d9_a32 = ops_nofpu_a32;
x86_opcodes_da_a16 = ops_nofpu_a16;
x86_opcodes_da_a32 = ops_nofpu_a32;
x86_opcodes_db_a16 = ops_nofpu_a16;
x86_opcodes_db_a32 = ops_nofpu_a32;
x86_opcodes_dc_a16 = ops_nofpu_a16;
x86_opcodes_dc_a32 = ops_nofpu_a32;
x86_opcodes_dd_a16 = ops_nofpu_a16;
x86_opcodes_dd_a32 = ops_nofpu_a32;
x86_opcodes_de_a16 = ops_nofpu_a16;
x86_opcodes_de_a32 = ops_nofpu_a32;
x86_opcodes_df_a16 = ops_nofpu_a16;
x86_opcodes_df_a32 = ops_nofpu_a32;
x86_2386_opcodes_d8_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_d8_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_d9_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_d9_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_da_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_da_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_db_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_db_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_dc_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_dc_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_dd_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_dd_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_de_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_de_a32 = ops_2386_nofpu_a32;
x86_2386_opcodes_df_a16 = ops_2386_nofpu_a16;
x86_2386_opcodes_df_a32 = ops_2386_nofpu_a32;
}
#ifdef USE_DYNAREC
codegen_timing_set(&codegen_timing_486);
#endif /* USE_DYNAREC */
memset(&msr, 0, sizeof(msr));
timing_misaligned = 0;
cpu_cyrix_alignment = 0;
cpu_cpurst_on_sr = 0;
cpu_CR4_mask = 0;
switch (cpu_s->cpu_type) {
case CPU_8088:
case CPU_8086:
break;
case CPU_V20:
case CPU_V30:
case CPU_186:
case CPU_188:
#ifdef USE_DYNAREC
x86_setopcodes(ops_186, ops_186_0f, dynarec_ops_186, dynarec_ops_186_0f);
#else
x86_setopcodes(ops_186, ops_186_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_186, ops_2386_186_0f);
break;
case CPU_286:
#ifdef USE_DYNAREC
x86_setopcodes(ops_286, ops_286_0f, dynarec_ops_286, dynarec_ops_286_0f);
#else
x86_setopcodes(ops_286, ops_286_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_286, ops_2386_286_0f);
if (fpu_type == FPU_287) {
#ifdef USE_DYNAREC
if (fpu_softfloat) {
x86_dynarec_opcodes_d9_a16 = dynarec_ops_sf_fpu_287_d9_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_sf_fpu_287_d9_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_sf_fpu_287_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_sf_fpu_287_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_sf_fpu_287_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_sf_fpu_287_db_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_sf_fpu_287_dc_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_sf_fpu_287_dc_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_sf_fpu_287_dd_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_sf_fpu_287_dd_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_sf_fpu_287_de_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_sf_fpu_287_de_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_sf_fpu_287_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_sf_fpu_287_df_a32;
} else {
x86_dynarec_opcodes_d9_a16 = dynarec_ops_fpu_287_d9_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_fpu_287_d9_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_fpu_287_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_fpu_287_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_fpu_287_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_fpu_287_db_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_fpu_287_dc_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_fpu_287_dc_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_fpu_287_dd_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_fpu_287_dd_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_fpu_287_de_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_fpu_287_de_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_fpu_287_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_fpu_287_df_a32;
}
#endif /* USE_DYNAREC */
if (fpu_softfloat) {
x86_opcodes_d9_a16 = ops_sf_fpu_287_d9_a16;
x86_opcodes_d9_a32 = ops_sf_fpu_287_d9_a32;
x86_opcodes_da_a16 = ops_sf_fpu_287_da_a16;
x86_opcodes_da_a32 = ops_sf_fpu_287_da_a32;
x86_opcodes_db_a16 = ops_sf_fpu_287_db_a16;
x86_opcodes_db_a32 = ops_sf_fpu_287_db_a32;
x86_opcodes_dc_a16 = ops_sf_fpu_287_dc_a16;
x86_opcodes_dc_a32 = ops_sf_fpu_287_dc_a32;
x86_opcodes_dd_a16 = ops_sf_fpu_287_dd_a16;
x86_opcodes_dd_a32 = ops_sf_fpu_287_dd_a32;
x86_opcodes_de_a16 = ops_sf_fpu_287_de_a16;
x86_opcodes_de_a32 = ops_sf_fpu_287_de_a32;
x86_opcodes_df_a16 = ops_sf_fpu_287_df_a16;
x86_opcodes_df_a32 = ops_sf_fpu_287_df_a32;
x86_2386_opcodes_d9_a16 = ops_2386_sf_fpu_287_d9_a16;
x86_2386_opcodes_d9_a32 = ops_2386_sf_fpu_287_d9_a32;
x86_2386_opcodes_da_a16 = ops_2386_sf_fpu_287_da_a16;
x86_2386_opcodes_da_a32 = ops_2386_sf_fpu_287_da_a32;
x86_2386_opcodes_db_a16 = ops_2386_sf_fpu_287_db_a16;
x86_2386_opcodes_db_a32 = ops_2386_sf_fpu_287_db_a32;
x86_2386_opcodes_dc_a16 = ops_2386_sf_fpu_287_dc_a16;
x86_2386_opcodes_dc_a32 = ops_2386_sf_fpu_287_dc_a32;
x86_2386_opcodes_dd_a16 = ops_2386_sf_fpu_287_dd_a16;
x86_2386_opcodes_dd_a32 = ops_2386_sf_fpu_287_dd_a32;
x86_2386_opcodes_de_a16 = ops_2386_sf_fpu_287_de_a16;
x86_2386_opcodes_de_a32 = ops_2386_sf_fpu_287_de_a32;
x86_2386_opcodes_df_a16 = ops_2386_sf_fpu_287_df_a16;
x86_2386_opcodes_df_a32 = ops_2386_sf_fpu_287_df_a32;
} else {
x86_opcodes_d9_a16 = ops_fpu_287_d9_a16;
x86_opcodes_d9_a32 = ops_fpu_287_d9_a32;
x86_opcodes_da_a16 = ops_fpu_287_da_a16;
x86_opcodes_da_a32 = ops_fpu_287_da_a32;
x86_opcodes_db_a16 = ops_fpu_287_db_a16;
x86_opcodes_db_a32 = ops_fpu_287_db_a32;
x86_opcodes_dc_a16 = ops_fpu_287_dc_a16;
x86_opcodes_dc_a32 = ops_fpu_287_dc_a32;
x86_opcodes_dd_a16 = ops_fpu_287_dd_a16;
x86_opcodes_dd_a32 = ops_fpu_287_dd_a32;
x86_opcodes_de_a16 = ops_fpu_287_de_a16;
x86_opcodes_de_a32 = ops_fpu_287_de_a32;
x86_opcodes_df_a16 = ops_fpu_287_df_a16;
x86_opcodes_df_a32 = ops_fpu_287_df_a32;
x86_2386_opcodes_d9_a16 = ops_2386_fpu_287_d9_a16;
x86_2386_opcodes_d9_a32 = ops_2386_fpu_287_d9_a32;
x86_2386_opcodes_da_a16 = ops_2386_fpu_287_da_a16;
x86_2386_opcodes_da_a32 = ops_2386_fpu_287_da_a32;
x86_2386_opcodes_db_a16 = ops_2386_fpu_287_db_a16;
x86_2386_opcodes_db_a32 = ops_2386_fpu_287_db_a32;
x86_2386_opcodes_dc_a16 = ops_2386_fpu_287_dc_a16;
x86_2386_opcodes_dc_a32 = ops_2386_fpu_287_dc_a32;
x86_2386_opcodes_dd_a16 = ops_2386_fpu_287_dd_a16;
x86_2386_opcodes_dd_a32 = ops_2386_fpu_287_dd_a32;
x86_2386_opcodes_de_a16 = ops_2386_fpu_287_de_a16;
x86_2386_opcodes_de_a32 = ops_2386_fpu_287_de_a32;
x86_2386_opcodes_df_a16 = ops_2386_fpu_287_df_a16;
x86_2386_opcodes_df_a32 = ops_2386_fpu_287_df_a32;
}
}
timing_rr = 2; /* register dest - register src */
timing_rm = 7; /* register dest - memory src */
timing_mr = 7; /* memory dest - register src */
timing_mm = 7; /* memory dest - memory src */
timing_rml = 9; /* register dest - memory src long */
timing_mrl = 11; /* memory dest - register src long */
timing_mml = 11; /* memory dest - memory src */
timing_bt = 4; /* branch taken */
timing_bnt = 3; /* branch not taken */
timing_int = 0;
timing_int_rm = 23;
timing_int_v86 = 0;
timing_int_pm = 40;
timing_int_pm_outer = 78;
timing_iret_rm = 17;
timing_iret_v86 = 0;
timing_iret_pm = 31;
timing_iret_pm_outer = 55;
timing_call_rm = 13;
timing_call_pm = 26;
timing_call_pm_gate = 52;
timing_call_pm_gate_inner = 82;
timing_retf_rm = 15;
timing_retf_pm = 25;
timing_retf_pm_outer = 55;
timing_jmp_rm = 11;
timing_jmp_pm = 23;
timing_jmp_pm_gate = 38;
break;
case CPU_IBM486SLC:
case CPU_IBM386SLC:
case CPU_IBM486BL:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_ibm486_0f, dynarec_ops_386, dynarec_ops_ibm486_0f);
#else
x86_setopcodes(ops_386, ops_ibm486_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_386, ops_2386_ibm486_0f);
cpu_features = CPU_FEATURE_MSR;
fallthrough;
case CPU_386SX:
case CPU_386DX:
/* In case we get Deskpro 386 emulation */
if (fpu_type == FPU_287) {
#ifdef USE_DYNAREC
if (fpu_softfloat) {
x86_dynarec_opcodes_d9_a16 = dynarec_ops_sf_fpu_287_d9_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_sf_fpu_287_d9_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_sf_fpu_287_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_sf_fpu_287_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_sf_fpu_287_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_sf_fpu_287_db_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_sf_fpu_287_dc_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_sf_fpu_287_dc_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_sf_fpu_287_dd_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_sf_fpu_287_dd_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_sf_fpu_287_de_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_sf_fpu_287_de_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_sf_fpu_287_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_sf_fpu_287_df_a32;
} else {
x86_dynarec_opcodes_d9_a16 = dynarec_ops_fpu_287_d9_a16;
x86_dynarec_opcodes_d9_a32 = dynarec_ops_fpu_287_d9_a32;
x86_dynarec_opcodes_da_a16 = dynarec_ops_fpu_287_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_fpu_287_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_fpu_287_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_fpu_287_db_a32;
x86_dynarec_opcodes_dc_a16 = dynarec_ops_fpu_287_dc_a16;
x86_dynarec_opcodes_dc_a32 = dynarec_ops_fpu_287_dc_a32;
x86_dynarec_opcodes_dd_a16 = dynarec_ops_fpu_287_dd_a16;
x86_dynarec_opcodes_dd_a32 = dynarec_ops_fpu_287_dd_a32;
x86_dynarec_opcodes_de_a16 = dynarec_ops_fpu_287_de_a16;
x86_dynarec_opcodes_de_a32 = dynarec_ops_fpu_287_de_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_fpu_287_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_fpu_287_df_a32;
}
#endif /* USE_DYNAREC */
if (fpu_softfloat) {
x86_opcodes_d9_a16 = ops_sf_fpu_287_d9_a16;
x86_opcodes_d9_a32 = ops_sf_fpu_287_d9_a32;
x86_opcodes_da_a16 = ops_sf_fpu_287_da_a16;
x86_opcodes_da_a32 = ops_sf_fpu_287_da_a32;
x86_opcodes_db_a16 = ops_sf_fpu_287_db_a16;
x86_opcodes_db_a32 = ops_sf_fpu_287_db_a32;
x86_opcodes_dc_a16 = ops_sf_fpu_287_dc_a16;
x86_opcodes_dc_a32 = ops_sf_fpu_287_dc_a32;
x86_opcodes_dd_a16 = ops_sf_fpu_287_dd_a16;
x86_opcodes_dd_a32 = ops_sf_fpu_287_dd_a32;
x86_opcodes_de_a16 = ops_sf_fpu_287_de_a16;
x86_opcodes_de_a32 = ops_sf_fpu_287_de_a32;
x86_opcodes_df_a16 = ops_sf_fpu_287_df_a16;
x86_opcodes_df_a32 = ops_sf_fpu_287_df_a32;
x86_2386_opcodes_d9_a16 = ops_2386_sf_fpu_287_d9_a16;
x86_2386_opcodes_d9_a32 = ops_2386_sf_fpu_287_d9_a32;
x86_2386_opcodes_da_a16 = ops_2386_sf_fpu_287_da_a16;
x86_2386_opcodes_da_a32 = ops_2386_sf_fpu_287_da_a32;
x86_2386_opcodes_db_a16 = ops_2386_sf_fpu_287_db_a16;
x86_2386_opcodes_db_a32 = ops_2386_sf_fpu_287_db_a32;
x86_2386_opcodes_dc_a16 = ops_2386_sf_fpu_287_dc_a16;
x86_2386_opcodes_dc_a32 = ops_2386_sf_fpu_287_dc_a32;
x86_2386_opcodes_dd_a16 = ops_2386_sf_fpu_287_dd_a16;
x86_2386_opcodes_dd_a32 = ops_2386_sf_fpu_287_dd_a32;
x86_2386_opcodes_de_a16 = ops_2386_sf_fpu_287_de_a16;
x86_2386_opcodes_de_a32 = ops_2386_sf_fpu_287_de_a32;
x86_2386_opcodes_df_a16 = ops_2386_sf_fpu_287_df_a16;
x86_2386_opcodes_df_a32 = ops_2386_sf_fpu_287_df_a32;
} else {
x86_opcodes_d9_a16 = ops_fpu_287_d9_a16;
x86_opcodes_d9_a32 = ops_fpu_287_d9_a32;
x86_opcodes_da_a16 = ops_fpu_287_da_a16;
x86_opcodes_da_a32 = ops_fpu_287_da_a32;
x86_opcodes_db_a16 = ops_fpu_287_db_a16;
x86_opcodes_db_a32 = ops_fpu_287_db_a32;
x86_opcodes_dc_a16 = ops_fpu_287_dc_a16;
x86_opcodes_dc_a32 = ops_fpu_287_dc_a32;
x86_opcodes_dd_a16 = ops_fpu_287_dd_a16;
x86_opcodes_dd_a32 = ops_fpu_287_dd_a32;
x86_opcodes_de_a16 = ops_fpu_287_de_a16;
x86_opcodes_de_a32 = ops_fpu_287_de_a32;
x86_opcodes_df_a16 = ops_fpu_287_df_a16;
x86_opcodes_df_a32 = ops_fpu_287_df_a32;
x86_2386_opcodes_d9_a16 = ops_2386_fpu_287_d9_a16;
x86_2386_opcodes_d9_a32 = ops_2386_fpu_287_d9_a32;
x86_2386_opcodes_da_a16 = ops_2386_fpu_287_da_a16;
x86_2386_opcodes_da_a32 = ops_2386_fpu_287_da_a32;
x86_2386_opcodes_db_a16 = ops_2386_fpu_287_db_a16;
x86_2386_opcodes_db_a32 = ops_2386_fpu_287_db_a32;
x86_2386_opcodes_dc_a16 = ops_2386_fpu_287_dc_a16;
x86_2386_opcodes_dc_a32 = ops_2386_fpu_287_dc_a32;
x86_2386_opcodes_dd_a16 = ops_2386_fpu_287_dd_a16;
x86_2386_opcodes_dd_a32 = ops_2386_fpu_287_dd_a32;
x86_2386_opcodes_de_a16 = ops_2386_fpu_287_de_a16;
x86_2386_opcodes_de_a32 = ops_2386_fpu_287_de_a32;
x86_2386_opcodes_df_a16 = ops_2386_fpu_287_df_a16;
x86_2386_opcodes_df_a32 = ops_2386_fpu_287_df_a32;
}
}
timing_rr = 2; /* register dest - register src */
timing_rm = 6; /* register dest - memory src */
timing_mr = 7; /* memory dest - register src */
timing_mm = 6; /* memory dest - memory src */
if (cpu_s->cpu_type >= CPU_386DX) {
timing_rml = 6; /* register dest - memory src long */
timing_mrl = 7; /* memory dest - register src long */
timing_mml = 6; /* memory dest - memory src */
} else {
timing_rml = 8; /* register dest - memory src long */
timing_mrl = 11; /* memory dest - register src long */
timing_mml = 10; /* memory dest - memory src */
}
timing_bt = 4; /* branch taken */
timing_bnt = 3; /* branch not taken */
timing_int = 0;
timing_int_rm = 37;
timing_int_v86 = 59;
timing_int_pm = 99;
timing_int_pm_outer = 119;
timing_iret_rm = 22;
timing_iret_v86 = 60;
timing_iret_pm = 38;
timing_iret_pm_outer = 82;
timing_call_rm = 17;
timing_call_pm = 34;
timing_call_pm_gate = 52;
timing_call_pm_gate_inner = 86;
timing_retf_rm = 18;
timing_retf_pm = 32;
timing_retf_pm_outer = 68;
timing_jmp_rm = 12;
timing_jmp_pm = 27;
timing_jmp_pm_gate = 45;
break;
case CPU_486SLC:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_486_0f, dynarec_ops_386, dynarec_ops_486_0f);
#else
x86_setopcodes(ops_386, ops_486_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_386, ops_2386_486_0f);
timing_rr = 1; /* register dest - register src */
timing_rm = 3; /* register dest - memory src */
timing_mr = 5; /* memory dest - register src */
timing_mm = 3;
timing_rml = 5; /* register dest - memory src long */
timing_mrl = 7; /* memory dest - register src long */
timing_mml = 7;
timing_bt = 5; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 4; /* unknown */
timing_int_rm = 14;
timing_int_v86 = 82;
timing_int_pm = 49;
timing_int_pm_outer = 77;
timing_iret_rm = 14;
timing_iret_v86 = 66;
timing_iret_pm = 31;
timing_iret_pm_outer = 66;
timing_call_rm = 12;
timing_call_pm = 30;
timing_call_pm_gate = 41;
timing_call_pm_gate_inner = 83;
timing_retf_rm = 13;
timing_retf_pm = 26;
timing_retf_pm_outer = 61;
timing_jmp_rm = 9;
timing_jmp_pm = 26;
timing_jmp_pm_gate = 37;
timing_misaligned = 3;
break;
case CPU_486DLC:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_486_0f, dynarec_ops_386, dynarec_ops_486_0f);
#else
x86_setopcodes(ops_386, ops_486_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_386, ops_2386_486_0f);
timing_rr = 1; /* register dest - register src */
timing_rm = 3; /* register dest - memory src */
timing_mr = 3; /* memory dest - register src */
timing_mm = 3;
timing_rml = 3; /* register dest - memory src long */
timing_mrl = 3; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 5; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 4; /* unknown */
timing_int_rm = 14;
timing_int_v86 = 82;
timing_int_pm = 49;
timing_int_pm_outer = 77;
timing_iret_rm = 14;
timing_iret_v86 = 66;
timing_iret_pm = 31;
timing_iret_pm_outer = 66;
timing_call_rm = 12;
timing_call_pm = 30;
timing_call_pm_gate = 41;
timing_call_pm_gate_inner = 83;
timing_retf_rm = 13;
timing_retf_pm = 26;
timing_retf_pm_outer = 61;
timing_jmp_rm = 9;
timing_jmp_pm = 26;
timing_jmp_pm_gate = 37;
timing_misaligned = 3;
break;
case CPU_i486SX_SLENH:
case CPU_i486DX_SLENH:
cpu_features = CPU_FEATURE_CR4 | CPU_FEATURE_VME;
cpu_CR4_mask = CR4_VME | CR4_PVI | CR4_VME;
fallthrough;
case CPU_RAPIDCAD:
case CPU_i486SX:
case CPU_i486DX:
case CPU_Am486SX:
case CPU_Am486DX:
case CPU_Am486DXL:
case CPU_ENH_Am486DX:
/*AMD timing identical to Intel*/
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_486_0f, dynarec_ops_386, dynarec_ops_486_0f);
#else
x86_setopcodes(ops_386, ops_486_0f);
#endif /* USE_DYNAREC */
x86_setopcodes_2386(ops_2386_386, ops_2386_486_0f);
timing_rr = 1; /* register dest - register src */
timing_rm = 2; /* register dest - memory src */
timing_mr = 3; /* memory dest - register src */
timing_mm = 3;
timing_rml = 2; /* register dest - memory src long */
timing_mrl = 3; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 2; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 4;
timing_int_rm = 26;
timing_int_v86 = 82;
timing_int_pm = 44;
timing_int_pm_outer = 71;
timing_iret_rm = 15;
timing_iret_v86 = 36; /* unknown */
timing_iret_pm = 20;
timing_iret_pm_outer = 36;
timing_call_rm = 18;
timing_call_pm = 20;
timing_call_pm_gate = 35;
timing_call_pm_gate_inner = 69;
timing_retf_rm = 13;
timing_retf_pm = 17;
timing_retf_pm_outer = 35;
timing_jmp_rm = 17;
timing_jmp_pm = 19;
timing_jmp_pm_gate = 32;
timing_misaligned = 3;
break;
case CPU_Cx486S:
case CPU_Cx486DX:
case CPU_STPC:
#ifdef USE_DYNAREC
if (cpu_s->cpu_type == CPU_STPC)
x86_setopcodes(ops_386, ops_stpc_0f, dynarec_ops_386, dynarec_ops_stpc_0f);
else
x86_setopcodes(ops_386, ops_c486_0f, dynarec_ops_386, dynarec_ops_c486_0f);
#else
if (cpu_s->cpu_type == CPU_STPC)
x86_setopcodes(ops_386, ops_stpc_0f);
else
x86_setopcodes(ops_386, ops_c486_0f);
#endif /* USE_DYNAREC */
timing_rr = 1; /* register dest - register src */
timing_rm = 3; /* register dest - memory src */
timing_mr = 3; /* memory dest - register src */
timing_mm = 3;
timing_rml = 3; /* register dest - memory src long */
timing_mrl = 3; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 3; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 4;
timing_int_rm = 14;
timing_int_v86 = 82;
timing_int_pm = 49;
timing_int_pm_outer = 77;
timing_iret_rm = 14;
timing_iret_v86 = 66; /* unknown */
timing_iret_pm = 31;
timing_iret_pm_outer = 66;
timing_call_rm = 12;
timing_call_pm = 30;
timing_call_pm_gate = 41;
timing_call_pm_gate_inner = 83;
timing_retf_rm = 13;
timing_retf_pm = 26;
timing_retf_pm_outer = 61;
timing_jmp_rm = 9;
timing_jmp_pm = 26;
timing_jmp_pm_gate = 37;
timing_misaligned = 3;
if (cpu_s->cpu_type == CPU_STPC)
cpu_features = CPU_FEATURE_RDTSC;
break;
case CPU_Cx5x86:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_c486_0f, dynarec_ops_386, dynarec_ops_c486_0f);
#else
x86_setopcodes(ops_386, ops_c486_0f);
#endif /* USE_DYNAREC */
timing_rr = 1; /* register dest - register src */
timing_rm = 1; /* register dest - memory src */
timing_mr = 2; /* memory dest - register src */
timing_mm = 2;
timing_rml = 1; /* register dest - memory src long */
timing_mrl = 2; /* memory dest - register src long */
timing_mml = 2;
timing_bt = 4; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 0;
timing_int_rm = 9;
timing_int_v86 = 82; /* unknown */
timing_int_pm = 21;
timing_int_pm_outer = 32;
timing_iret_rm = 7;
timing_iret_v86 = 26; /* unknown */
timing_iret_pm = 10;
timing_iret_pm_outer = 26;
timing_call_rm = 4;
timing_call_pm = 15;
timing_call_pm_gate = 26;
timing_call_pm_gate_inner = 35;
timing_retf_rm = 4;
timing_retf_pm = 7;
timing_retf_pm_outer = 23;
timing_jmp_rm = 5;
timing_jmp_pm = 7;
timing_jmp_pm_gate = 17;
timing_misaligned = 2;
cpu_cyrix_alignment = 1;
break;
case CPU_WINCHIP:
case CPU_WINCHIP2:
#ifdef USE_DYNAREC
if (cpu_s->cpu_type == CPU_WINCHIP2)
x86_setopcodes(ops_386, ops_winchip2_0f, dynarec_ops_386, dynarec_ops_winchip2_0f);
else
x86_setopcodes(ops_386, ops_winchip_0f, dynarec_ops_386, dynarec_ops_winchip_0f);
#else
if (cpu_s->cpu_type == CPU_WINCHIP2)
x86_setopcodes(ops_386, ops_winchip2_0f);
else
x86_setopcodes(ops_386, ops_winchip_0f);
#endif /* USE_DYNAREC */
timing_rr = 1; /* register dest - register src */
timing_rm = 2; /* register dest - memory src */
timing_mr = 2; /* memory dest - register src */
timing_mm = 3;
timing_rml = 2; /* register dest - memory src long */
timing_mrl = 2; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 2; /* branch taken */
timing_bnt = 1; /* branch not taken */
/*unknown*/
timing_int_rm = 26;
timing_int_v86 = 82;
timing_int_pm = 44;
timing_int_pm_outer = 71;
timing_iret_rm = 7;
timing_iret_v86 = 26;
timing_iret_pm = 10;
timing_iret_pm_outer = 26;
timing_call_rm = 4;
timing_call_pm = 15;
timing_call_pm_gate = 26;
timing_call_pm_gate_inner = 35;
timing_retf_rm = 4;
timing_retf_pm = 7;
timing_retf_pm_outer = 23;
timing_jmp_rm = 5;
timing_jmp_pm = 7;
timing_jmp_pm_gate = 17;
timing_misaligned = 2;
cpu_cyrix_alignment = 1;
cpu_features = CPU_FEATURE_RDTSC | CPU_FEATURE_MMX | CPU_FEATURE_MSR | CPU_FEATURE_CR4;
if (cpu_s->cpu_type == CPU_WINCHIP2)
cpu_features |= CPU_FEATURE_3DNOW;
msr.fcr = (1 << 8) | (1 << 9) | (1 << 12) | (1 << 16) | (1 << 19) | (1 << 21);
if (cpu_s->cpu_type == CPU_WINCHIP2)
msr.fcr |= (1 << 18) | (1 << 20);
cpu_CR4_mask = CR4_TSD | CR4_DE | CR4_MCE | CR4_PCE;
#ifdef USE_DYNAREC
if (cpu_s->cpu_type == CPU_WINCHIP2)
codegen_timing_set(&codegen_timing_winchip2);
else
codegen_timing_set(&codegen_timing_winchip);
#endif /* USE_DYNAREC */
break;
case CPU_P24T:
case CPU_PENTIUM:
case CPU_PENTIUMMMX:
#ifdef USE_DYNAREC
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
x86_setopcodes(ops_386, ops_pentiummmx_0f, dynarec_ops_386, dynarec_ops_pentiummmx_0f);
else
x86_setopcodes(ops_386, ops_pentium_0f, dynarec_ops_386, dynarec_ops_pentium_0f);
#else
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
x86_setopcodes(ops_386, ops_pentiummmx_0f);
else
x86_setopcodes(ops_386, ops_pentium_0f);
#endif /* USE_DYNAREC */
timing_rr = 1; /* register dest - register src */
timing_rm = 2; /* register dest - memory src */
timing_mr = 3; /* memory dest - register src */
timing_mm = 3;
timing_rml = 2; /* register dest - memory src long */
timing_mrl = 3; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 0; /* branch taken */
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
timing_bnt = 1; /* branch not taken */
else
timing_bnt = 2; /* branch not taken */
timing_int = 6;
timing_int_rm = 11;
timing_int_v86 = 54;
timing_int_pm = 25;
timing_int_pm_outer = 42;
timing_iret_rm = 7;
timing_iret_v86 = 27; /* unknown */
timing_iret_pm = 10;
timing_iret_pm_outer = 27;
timing_call_rm = 4;
timing_call_pm = 4;
timing_call_pm_gate = 22;
timing_call_pm_gate_inner = 44;
timing_retf_rm = 4;
timing_retf_pm = 4;
timing_retf_pm_outer = 23;
timing_jmp_rm = 3;
timing_jmp_pm = 3;
timing_jmp_pm_gate = 18;
timing_misaligned = 3;
cpu_features = CPU_FEATURE_RDTSC | CPU_FEATURE_MSR | CPU_FEATURE_CR4 | CPU_FEATURE_VME;
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
cpu_features |= CPU_FEATURE_MMX;
cpu_CR4_mask = CR4_VME | CR4_PVI | CR4_TSD | CR4_DE | CR4_PSE | CR4_MCE | CR4_PCE;
#ifdef USE_DYNAREC
codegen_timing_set(&codegen_timing_pentium);
#endif /* USE_DYNAREC */
break;
#ifdef USE_CYRIX_6X86
case CPU_Cx6x86:
case CPU_Cx6x86L:
case CPU_CxGX1:
case CPU_Cx6x86MX:
if (cpu_s->cpu_type == CPU_Cx6x86MX) {
# ifdef USE_DYNAREC
if (fpu_softfloat) {
x86_dynarec_opcodes_da_a16 = dynarec_ops_sf_fpu_686_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_sf_fpu_686_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_sf_fpu_686_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_sf_fpu_686_db_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_sf_fpu_686_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_sf_fpu_686_df_a32;
} else {
x86_dynarec_opcodes_da_a16 = dynarec_ops_fpu_686_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_fpu_686_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_fpu_686_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_fpu_686_db_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_fpu_686_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_fpu_686_df_a32;
}
# endif /* USE_DYNAREC */
if (fpu_softfloat) {
x86_opcodes_da_a16 = ops_sf_fpu_686_da_a16;
x86_opcodes_da_a32 = ops_sf_fpu_686_da_a32;
x86_opcodes_db_a16 = ops_sf_fpu_686_db_a16;
x86_opcodes_db_a32 = ops_sf_fpu_686_db_a32;
x86_opcodes_df_a16 = ops_sf_fpu_686_df_a16;
x86_opcodes_df_a32 = ops_sf_fpu_686_df_a32;
} else {
x86_opcodes_da_a16 = ops_fpu_686_da_a16;
x86_opcodes_da_a32 = ops_fpu_686_da_a32;
x86_opcodes_db_a16 = ops_fpu_686_db_a16;
x86_opcodes_db_a32 = ops_fpu_686_db_a32;
x86_opcodes_df_a16 = ops_fpu_686_df_a16;
x86_opcodes_df_a32 = ops_fpu_686_df_a32;
}
}
# ifdef USE_DYNAREC
if (cpu_s->cpu_type == CPU_Cx6x86MX)
x86_setopcodes(ops_386, ops_c6x86mx_0f, dynarec_ops_386, dynarec_ops_c6x86mx_0f);
else if (cpu_s->cpu_type == CPU_Cx6x86L)
x86_setopcodes(ops_386, ops_pentium_0f, dynarec_ops_386, dynarec_ops_pentium_0f);
else
x86_setopcodes(ops_386, ops_c6x86mx_0f, dynarec_ops_386, dynarec_ops_c6x86mx_0f);
# if 0
x86_setopcodes(ops_386, ops_c6x86_0f, dynarec_ops_386, dynarec_ops_c6x86_0f);
# endif
# else
if (cpu_s->cpu_type == CPU_Cx6x86MX)
x86_setopcodes(ops_386, ops_c6x86mx_0f);
else if (cpu_s->cpu_type == CPU_Cx6x86L)
x86_setopcodes(ops_386, ops_pentium_0f);
else
x86_setopcodes(ops_386, ops_c6x86mx_0f);
# if 0
x86_setopcodes(ops_386, ops_c6x86_0f);
# endif
# endif /* USE_DYNAREC */
timing_rr = 1; /* register dest - register src */
timing_rm = 1; /* register dest - memory src */
timing_mr = 2; /* memory dest - register src */
timing_mm = 2;
timing_rml = 1; /* register dest - memory src long */
timing_mrl = 2; /* memory dest - register src long */
timing_mml = 2;
if (cpu_s->cpu_type == CPU_CxGX1) {
timing_bt = 4; /* branch taken */
timing_bnt = 1; /* branch not taken */
} else {
timing_bt = 0; /* branch taken */
timing_bnt = 2; /* branch not taken */
}
/* Make the CxGX1 share the timings with most other Cyrix C6x86's due to the real
ones still being unknown. */
timing_int_rm = 9;
timing_int_v86 = 46;
timing_int_pm = 21;
timing_int_pm_outer = 32;
timing_iret_rm = 7;
timing_iret_v86 = 26;
timing_iret_pm = 10;
timing_iret_pm_outer = 26;
timing_call_rm = 3;
timing_call_pm = 4;
timing_call_pm_gate = 15;
timing_call_pm_gate_inner = 26;
timing_retf_rm = 4;
timing_retf_pm = 4;
timing_retf_pm_outer = 23;
timing_jmp_rm = 1;
timing_jmp_pm = 4;
timing_jmp_pm_gate = 14;
timing_misaligned = 2;
cpu_cyrix_alignment = 1;
cpu_features = CPU_FEATURE_RDTSC;
if (cpu_s->cpu_type >= CPU_CxGX1)
cpu_features |= CPU_FEATURE_MSR | CPU_FEATURE_CR4;
if (cpu_s->cpu_type == CPU_Cx6x86MX)
cpu_features |= CPU_FEATURE_MMX;
if (cpu_s->cpu_type >= CPU_CxGX1)
cpu_CR4_mask = CR4_TSD | CR4_DE | CR4_PCE;
# ifdef USE_DYNAREC
codegen_timing_set(&codegen_timing_686);
# endif /* USE_DYNAREC */
if ((cpu_s->cpu_type == CPU_Cx6x86L) || (cpu_s->cpu_type == CPU_Cx6x86MX))
ccr4 = 0x80;
else if (CPU_Cx6x86)
CPUID = 0; /* Disabled on powerup by default */
break;
#endif /* USE_CYRIX_6X86 */
#ifdef USE_AMD_K5
case CPU_K5:
case CPU_5K86:
#endif /* USE_AMD_K5 */
case CPU_K6:
case CPU_K6_2:
case CPU_K6_2C:
case CPU_K6_3:
case CPU_K6_2P:
case CPU_K6_3P:
#ifdef USE_DYNAREC
if (cpu_s->cpu_type >= CPU_K6_2)
x86_setopcodes(ops_386, ops_k62_0f, dynarec_ops_386, dynarec_ops_k62_0f);
# ifdef USE_AMD_K5
else if (cpu_s->cpu_type == CPU_K6)
x86_setopcodes(ops_386, ops_k6_0f, dynarec_ops_386, dynarec_ops_k6_0f);
else
x86_setopcodes(ops_386, ops_pentiummmx_0f, dynarec_ops_386, dynarec_ops_pentiummmx_0f);
# else
else
x86_setopcodes(ops_386, ops_k6_0f, dynarec_ops_386, dynarec_ops_k6_0f);
# endif /* USE_AMD_K5 */
#else
if (cpu_s->cpu_type >= CPU_K6_2)
x86_setopcodes(ops_386, ops_k62_0f);
# ifdef USE_AMD_K5
else if (cpu_s->cpu_type == CPU_K6)
x86_setopcodes(ops_386, ops_k6_0f);
else
x86_setopcodes(ops_386, ops_pentiummmx_0f);
# else
else
x86_setopcodes(ops_386, ops_k6_0f);
# endif /* USE_AMD_K5 */
#endif /* USE_DYNAREC */
if ((cpu_s->cpu_type == CPU_K6_2P) || (cpu_s->cpu_type == CPU_K6_3P)) {
x86_opcodes_3DNOW = ops_3DNOWE;
#ifdef USE_DYNAREC
x86_dynarec_opcodes_3DNOW = dynarec_ops_3DNOWE;
#endif /* USE_DYNAREC */
}
timing_rr = 1; /* register dest - register src */
timing_rm = 2; /* register dest - memory src */
timing_mr = 3; /* memory dest - register src */
timing_mm = 3;
timing_rml = 2; /* register dest - memory src long */
timing_mrl = 3; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 0; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 6;
timing_int_rm = 11;
timing_int_v86 = 54;
timing_int_pm = 25;
timing_int_pm_outer = 42;
timing_iret_rm = 7;
timing_iret_v86 = 27; /* unknown */
timing_iret_pm = 10;
timing_iret_pm_outer = 27;
timing_call_rm = 4;
timing_call_pm = 4;
timing_call_pm_gate = 22;
timing_call_pm_gate_inner = 44;
timing_retf_rm = 4;
timing_retf_pm = 4;
timing_retf_pm_outer = 23;
timing_jmp_rm = 3;
timing_jmp_pm = 3;
timing_jmp_pm_gate = 18;
timing_misaligned = 3;
cpu_features = CPU_FEATURE_RDTSC | CPU_FEATURE_MSR | CPU_FEATURE_CR4 | CPU_FEATURE_VME | CPU_FEATURE_MMX;
if (cpu_s->cpu_type >= CPU_K6_2)
cpu_features |= CPU_FEATURE_3DNOW;
if ((cpu_s->cpu_type == CPU_K6_2P) || (cpu_s->cpu_type == CPU_K6_3P))
cpu_features |= CPU_FEATURE_3DNOWE;
#ifdef USE_AMD_K5
cpu_CR4_mask = CR4_TSD | CR4_DE | CR4_MCE;
if (cpu_s->cpu_type >= CPU_K6) {
cpu_CR4_mask |= (CR4_VME | CR4_PVI | CR4_PSE);
if (cpu_s->cpu_type <= CPU_K6)
cpu_CR4_mask |= CR4_PCE;
else if (cpu_s->cpu_type >= CPU_K6_2C)
cpu_CR4_mask |= CR4_PGE;
} else
cpu_CR4_mask |= CR4_PGE;
#else
cpu_CR4_mask = CR4_VME | CR4_PVI | CR4_TSD | CR4_DE | CR4_PSE | CR4_MCE;
if (cpu_s->cpu_type == CPU_K6)
cpu_CR4_mask |= CR4_PCE;
else if (cpu_s->cpu_type >= CPU_K6_2C)
cpu_CR4_mask |= CR4_PGE;
#endif /* USE_AMD_K5 */
#ifdef USE_DYNAREC
codegen_timing_set(&codegen_timing_k6);
#endif /* USE_DYNAREC */
break;
case CPU_PENTIUMPRO:
case CPU_PENTIUM2:
case CPU_PENTIUM2D:
#ifdef USE_DYNAREC
/* TODO: Perhaps merge the three opcode tables with some instructions UD#'ing depending on
CPU type. */
if (cpu_s->cpu_type == CPU_PENTIUM2D)
x86_setopcodes(ops_386, ops_pentium2d_0f, dynarec_ops_386, dynarec_ops_pentium2d_0f);
else if (cpu_s->cpu_type == CPU_PENTIUM2)
x86_setopcodes(ops_386, ops_pentium2_0f, dynarec_ops_386, dynarec_ops_pentium2_0f);
else
x86_setopcodes(ops_386, ops_pentiumpro_0f, dynarec_ops_386, dynarec_ops_pentiumpro_0f);
if (fpu_softfloat) {
x86_dynarec_opcodes_da_a16 = dynarec_ops_sf_fpu_686_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_sf_fpu_686_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_sf_fpu_686_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_sf_fpu_686_db_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_sf_fpu_686_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_sf_fpu_686_df_a32;
} else {
x86_dynarec_opcodes_da_a16 = dynarec_ops_fpu_686_da_a16;
x86_dynarec_opcodes_da_a32 = dynarec_ops_fpu_686_da_a32;
x86_dynarec_opcodes_db_a16 = dynarec_ops_fpu_686_db_a16;
x86_dynarec_opcodes_db_a32 = dynarec_ops_fpu_686_db_a32;
x86_dynarec_opcodes_df_a16 = dynarec_ops_fpu_686_df_a16;
x86_dynarec_opcodes_df_a32 = dynarec_ops_fpu_686_df_a32;
}
#else
if (cpu_s->cpu_type == CPU_PENTIUM2D)
x86_setopcodes(ops_386, ops_pentium2d_0f);
else
x86_setopcodes(ops_386, ops_pentium2_0f);
#endif /* USE_DYNAREC */
if (fpu_softfloat) {
x86_opcodes_da_a16 = ops_sf_fpu_686_da_a16;
x86_opcodes_da_a32 = ops_sf_fpu_686_da_a32;
x86_opcodes_db_a16 = ops_sf_fpu_686_db_a16;
x86_opcodes_db_a32 = ops_sf_fpu_686_db_a32;
x86_opcodes_df_a16 = ops_sf_fpu_686_df_a16;
x86_opcodes_df_a32 = ops_sf_fpu_686_df_a32;
} else {
x86_opcodes_da_a16 = ops_fpu_686_da_a16;
x86_opcodes_da_a32 = ops_fpu_686_da_a32;
x86_opcodes_db_a16 = ops_fpu_686_db_a16;
x86_opcodes_db_a32 = ops_fpu_686_db_a32;
x86_opcodes_df_a16 = ops_fpu_686_df_a16;
x86_opcodes_df_a32 = ops_fpu_686_df_a32;
}
timing_rr = 1; /* register dest - register src */
timing_rm = 2; /* register dest - memory src */
timing_mr = 3; /* memory dest - register src */
timing_mm = 3;
timing_rml = 2; /* register dest - memory src long */
timing_mrl = 3; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 0; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int = 6;
timing_int_rm = 11;
timing_int_v86 = 54;
timing_int_pm = 25;
timing_int_pm_outer = 42;
timing_iret_rm = 7;
timing_iret_v86 = 27; /* unknown */
timing_iret_pm = 10;
timing_iret_pm_outer = 27;
timing_call_rm = 4;
timing_call_pm = 4;
timing_call_pm_gate = 22;
timing_call_pm_gate_inner = 44;
timing_retf_rm = 4;
timing_retf_pm = 4;
timing_retf_pm_outer = 23;
timing_jmp_rm = 3;
timing_jmp_pm = 3;
timing_jmp_pm_gate = 18;
timing_misaligned = 3;
cpu_features = CPU_FEATURE_RDTSC | CPU_FEATURE_MSR | CPU_FEATURE_CR4 | CPU_FEATURE_VME;
if (cpu_s->cpu_type >= CPU_PENTIUM2)
cpu_features |= CPU_FEATURE_MMX;
cpu_CR4_mask = CR4_VME | CR4_PVI | CR4_TSD | CR4_DE | CR4_PSE | CR4_MCE | CR4_PAE | CR4_PCE | CR4_PGE;
if (cpu_s->cpu_type == CPU_PENTIUM2D)
cpu_CR4_mask |= CR4_OSFXSR;
#ifdef USE_DYNAREC
codegen_timing_set(&codegen_timing_p6);
#endif /* USE_DYNAREC */
break;
case CPU_CYRIX3S:
#ifdef USE_DYNAREC
x86_setopcodes(ops_386, ops_winchip2_0f, dynarec_ops_386, dynarec_ops_winchip2_0f);
#else
x86_setopcodes(ops_386, ops_winchip2_0f);
#endif /* USE_DYNAREC */
timing_rr = 1; /* register dest - register src */
timing_rm = 2; /* register dest - memory src */
timing_mr = 2; /* memory dest - register src */
timing_mm = 3;
timing_rml = 2; /* register dest - memory src long */
timing_mrl = 2; /* memory dest - register src long */
timing_mml = 3;
timing_bt = 2; /* branch taken */
timing_bnt = 1; /* branch not taken */
timing_int_rm = 26; /* unknown */
timing_int_v86 = 82;
timing_int_pm = 44;
timing_int_pm_outer = 71;
timing_iret_rm = 7;
timing_iret_v86 = 26;
timing_iret_pm = 10;
timing_iret_pm_outer = 26;
timing_call_rm = 4;
timing_call_pm = 15;
timing_call_pm_gate = 26;
timing_call_pm_gate_inner = 35;
timing_retf_rm = 4;
timing_retf_pm = 7;
timing_retf_pm_outer = 23;
timing_jmp_rm = 5;
timing_jmp_pm = 7;
timing_jmp_pm_gate = 17;
timing_misaligned = 2;
cpu_features = CPU_FEATURE_RDTSC | CPU_FEATURE_MMX | CPU_FEATURE_MSR | CPU_FEATURE_CR4 | CPU_FEATURE_3DNOW;
msr.fcr = (1 << 7) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 16) | (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21);
cpu_CR4_mask = CR4_TSD | CR4_DE | CR4_MCE | CR4_PCE | CR4_PGE;
cpu_cyrix_alignment = 1;
#ifdef USE_DYNAREC
codegen_timing_set(&codegen_timing_winchip);
#endif /* USE_DYNAREC */
break;
default:
fatal("cpu_set : unknown CPU type %" PRIu64 "\n", cpu_s->cpu_type);
}
switch (fpu_type) {
case FPU_NONE:
break;
case FPU_8087:
x87_timings = x87_timings_8087;
break;
case FPU_287:
x87_timings = x87_timings_287;
break;
case FPU_287XL:
case FPU_387:
x87_timings = x87_timings_387;
break;
case FPU_487SX:
default:
x87_timings = x87_timings_486;
x87_concurrency = x87_concurrency_486;
}
cpu_use_exec = 0;
if (is386) {
#if defined(USE_DYNAREC) && !defined(USE_GDBSTUB)
if (cpu_use_dynarec) {
cpu_exec = exec386_dynarec;
cpu_use_exec = 1;
} else
#endif /* defined(USE_DYNAREC) && !defined(USE_GDBSTUB) */
/* Use exec386 for CPU_IBM486SLC because it can reach 100 MHz. */
if ((cpu_s->cpu_type == CPU_IBM486SLC) || (cpu_s->cpu_type == CPU_IBM486BL) ||
cpu_iscyrix || (cpu_s->cpu_type > CPU_486DLC) || cpu_override_interpreter) {
cpu_exec = exec386;
cpu_use_exec = 1;
} else
cpu_exec = exec386_2386;
} else if (cpu_s->cpu_type >= CPU_286)
cpu_exec = exec386_2386;
else
cpu_exec = execx86;
mmx_init();
gdbstub_cpu_init();
}
void
cpu_close(void)
{
cpu_inited = 0;
}
void
cpu_set_isa_speed(int speed)
{
if (speed) {
cpu_isa_speed = speed;
} else if (cpu_busspeed >= 8000000)
cpu_isa_speed = 8000000;
else
cpu_isa_speed = cpu_busspeed;
pc_speed_changed();
cpu_log("cpu_set_isa_speed(%d) = %d\n", speed, cpu_isa_speed);
}
void
cpu_set_pci_speed(int speed)
{
if (speed)
cpu_pci_speed = speed;
else if (cpu_busspeed < 42500000)
cpu_pci_speed = cpu_busspeed;
else if (cpu_busspeed < 84000000)
cpu_pci_speed = cpu_busspeed / 2;
else if (cpu_busspeed < 120000000)
cpu_pci_speed = cpu_busspeed / 3;
else
cpu_pci_speed = cpu_busspeed / 4;
if (cpu_isa_pci_div)
cpu_set_isa_pci_div(cpu_isa_pci_div);
else if (speed)
pc_speed_changed();
pci_burst_time = cpu_s->rspeed / cpu_pci_speed;
pci_nonburst_time = 4 * pci_burst_time;
cpu_log("cpu_set_pci_speed(%d) = %d\n", speed, cpu_pci_speed);
}
void
cpu_set_isa_pci_div(int div)
{
cpu_isa_pci_div = div;
cpu_log("cpu_set_isa_pci_div(%d)\n", cpu_isa_pci_div);
if (cpu_isa_pci_div)
cpu_set_isa_speed(cpu_pci_speed / cpu_isa_pci_div);
else
cpu_set_isa_speed(0);
}
void
cpu_set_agp_speed(int speed)
{
if (speed) {
cpu_agp_speed = speed;
pc_speed_changed();
} else if (cpu_busspeed < 84000000)
cpu_agp_speed = cpu_busspeed;
else if (cpu_busspeed < 120000000)
cpu_agp_speed = cpu_busspeed / 1.5;
else
cpu_agp_speed = cpu_busspeed / 2;
agp_burst_time = cpu_s->rspeed / cpu_agp_speed;
agp_nonburst_time = 4 * agp_burst_time;
cpu_log("cpu_set_agp_speed(%d) = %d\n", speed, cpu_agp_speed);
}
char *
cpu_current_pc(char *bufp)
{
static char buff[10];
if (bufp == NULL)
bufp = buff;
sprintf(bufp, "%04X:%04X", CS, cpu_state.pc);
return bufp;
}
void
cpu_CPUID(void)
{
switch (cpu_s->cpu_type) {
case CPU_i486SX_SLENH:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_VME;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_i486DX_SLENH:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
if ((CPUID == 0x0436) && (cr0 & (1 << 29)))
EAX = 0x0470;
else
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_ENH_Am486DX:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x68747541;/* AuthenticAMD */
ECX = 0x444D4163;
EDX = 0x69746E65;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_WINCHIP:
if (!EAX) {
EAX = 0x00000001;
if (msr.fcr2 & (1 << 14)) {
EBX = msr.fcr3 >> 32;
ECX = msr.fcr3 & 0xffffffff;
EDX = msr.fcr2 >> 32;
} else {
EBX = 0x746e6543; /* CentaurHauls */
ECX = 0x736c7561;
EDX = 0x48727561;
}
} else if (EAX == 1) {
EAX = ((msr.fcr2 & 0x0ff0) ? ((msr.fcr2 & 0x0ff0) | (CPUID & 0xf00f)) : CPUID);
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR;
if (cpu_has_feature(CPU_FEATURE_CX8))
EDX |= CPUID_CMPXCHG8B;
if (msr.fcr & (1 << 9))
EDX |= CPUID_MMX;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_WINCHIP2:
switch (EAX) {
case 0:
EAX = 0x00000001;
if (msr.fcr2 & (1 << 14)) {
EBX = msr.fcr3 >> 32;
ECX = msr.fcr3 & 0xffffffff;
EDX = msr.fcr2 >> 32;
} else {
EBX = 0x746e6543; /* CentaurHauls */
ECX = 0x736c7561;
EDX = 0x48727561;
}
break;
case 1:
EAX = ((msr.fcr2 & 0x0ff0) ? ((msr.fcr2 & 0x0ff0) | (CPUID & 0xf00f)) : CPUID);
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR;
if (cpu_has_feature(CPU_FEATURE_CX8))
EDX |= CPUID_CMPXCHG8B;
if (msr.fcr & (1 << 9))
EDX |= CPUID_MMX;
break;
case 0x80000000:
EAX = 0x80000005;
break;
case 0x80000001:
EAX = CPUID;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR;
if (cpu_has_feature(CPU_FEATURE_CX8))
EDX |= CPUID_CMPXCHG8B;
if (msr.fcr & (1 << 9))
EDX |= CPUID_MMX;
if (cpu_has_feature(CPU_FEATURE_3DNOW))
EDX |= CPUID_3DNOW;
break;
case 0x80000002: /* Processor name string */
EAX = 0x20544449; /* IDT WinChip 2-3D */
EBX = 0x436e6957;
ECX = 0x20706968;
EDX = 0x44332d32;
break;
case 0x80000005: /*Cache information*/
EBX = 0x08800880; /*TLBs*/
ECX = 0x20040120; /*L1 data cache*/
EDX = 0x20020120; /*L1 instruction cache*/
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
case CPU_P24T:
case CPU_PENTIUM:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_CMPXCHG8B;
if (cpu_s->cpu_type != CPU_P24T)
EDX |= CPUID_MCE;
} else
EAX = EBX = ECX = EDX = 0;
break;
#ifdef USE_AMD_K5
case CPU_K5:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x68747541; /* AuthenticAMD */
EDX = 0x69746E65;
ECX = 0x444D4163;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_AMDPGE;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_5K86:
switch (EAX) {
case 0:
EAX = 0x00000001;
EBX = 0x68747541; /* AuthenticAMD */
EDX = 0x69746E65;
ECX = 0x444D4163;
break;
case 1:
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_PGE;
break;
case 0x80000000:
EAX = 0x80000005;
EBX = ECX = EDX = 0;
break;
case 0x80000001:
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_PGE;
break;
case 0x80000002: /* Processor name string */
EAX = 0x2D444D41; /* AMD-K5(tm) Proce */
EBX = 0x7428354B;
ECX = 0x5020296D;
EDX = 0x65636F72;
break;
case 0x80000003: /* Processor name string */
EAX = 0x726F7373; /* ssor */
EBX = ECX = EDX = 0;
break;
case 0x80000005: /* Cache information */
EAX = 0;
EBX = 0x04800000; /* TLBs */
ECX = 0x08040120; /* L1 data cache */
EDX = 0x10040120; /* L1 instruction cache */
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
#endif /* USE_AMD_K5 */
case CPU_K6:
switch (EAX) {
case 0:
EAX = 0x00000001;
EBX = 0x68747541; /* AuthenticAMD */
EDX = 0x69746E65;
ECX = 0x444D4163;
break;
case 1:
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_MMX;
break;
case 0x80000000:
EAX = 0x80000005;
EBX = ECX = EDX = 0;
break;
case 0x80000001:
EAX = CPUID + 0x100;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_AMDSEP | CPUID_MMX;
break;
case 0x80000002: /* Processor name string */
EAX = 0x2D444D41; /* AMD-K6tm w/ mult */
EBX = 0x6D74364B;
ECX = 0x202F7720;
EDX = 0x746C756D;
break;
case 0x80000003: /* Processor name string */
EAX = 0x64656D69; /* imedia extension */
EBX = 0x65206169;
ECX = 0x6E657478;
EDX = 0x6E6F6973;
break;
case 0x80000004: /* Processor name string */
EAX = 0x73; /* s */
EBX = ECX = EDX = 0;
break;
case 0x80000005: /* Cache information */
EAX = 0;
EBX = 0x02800140; /* TLBs */
ECX = 0x20020220; /* L1 data cache */
EDX = 0x20020220; /* L1 instruction cache */
break;
case 0x8FFFFFFF: /* Easter egg */
EAX = 0x4778654E; /* NexGenerationAMD */
EBX = 0x72656E65;
ECX = 0x6F697461;
EDX = 0x444D416E;
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
case CPU_K6_2:
case CPU_K6_2C:
switch (EAX) {
case 0:
EAX = 0x00000001;
EBX = 0x68747541; /* AuthenticAMD */
ECX = 0x444d4163;
EDX = 0x69746e65;
break;
case 1:
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_MMX;
if (cpu_s->cpu_type == CPU_K6_2C)
EDX |= CPUID_PGE;
break;
case 0x80000000:
EAX = 0x80000005;
EBX = ECX = EDX = 0;
break;
case 0x80000001:
EAX = CPUID + 0x100;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_SEP | CPUID_MMX | CPUID_3DNOW;
if (cpu_s->cpu_type == CPU_K6_2C)
EDX |= CPUID_PGE;
break;
case 0x80000002: /* Processor name string */
EAX = 0x2d444d41; /* AMD-K6(tm) 3D pr */
EBX = 0x7428364b;
ECX = 0x3320296d;
EDX = 0x72702044;
break;
case 0x80000003: /* Processor name string */
EAX = 0x7365636f; /* ocessor */
EBX = 0x00726f73;
ECX = 0x00000000;
EDX = 0x00000000;
break;
case 0x80000005: /* Cache information */
EAX = 0;
EBX = 0x02800140; /* TLBs */
ECX = 0x20020220; /* L1 data cache */
EDX = 0x20020220; /* L1 instruction cache */
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
case CPU_K6_3:
switch (EAX) {
case 0:
EAX = 0x00000001;
EBX = 0x68747541; /* AuthenticAMD */
ECX = 0x444d4163;
EDX = 0x69746e65;
break;
case 1:
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_PGE | CPUID_MMX;
break;
case 0x80000000:
EAX = 0x80000006;
EBX = ECX = EDX = 0;
break;
case 0x80000001:
EAX = CPUID + 0x100;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_SEP | CPUID_PGE | CPUID_MMX | CPUID_3DNOW;
break;
case 0x80000002: /* Processor name string */
EAX = 0x2d444d41; /* AMD-K6(tm) 3D+ P */
EBX = 0x7428364b;
ECX = 0x3320296d;
EDX = 0x50202b44;
break;
case 0x80000003: /* Processor name string */
EAX = 0x65636f72; /* rocessor */
EBX = 0x726f7373;
ECX = 0x00000000;
EDX = 0x00000000;
break;
case 0x80000005: /* Cache information */
EAX = 0;
EBX = 0x02800140; /* TLBs */
ECX = 0x20020220; /* L1 data cache */
EDX = 0x20020220; /* L1 instruction cache */
break;
case 0x80000006: /* L2 Cache information */
EAX = EBX = EDX = 0;
ECX = 0x01004220;
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
case CPU_K6_2P:
case CPU_K6_3P:
switch (EAX) {
case 0:
EAX = 0x00000001;
EBX = 0x68747541; /* AuthenticAMD */
ECX = 0x444d4163;
EDX = 0x69746e65;
break;
case 1:
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_PGE | CPUID_MMX;
break;
case 0x80000000:
EAX = 0x80000007;
EBX = ECX = EDX = 0;
break;
case 0x80000001:
EAX = CPUID + 0x100;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_SEP | CPUID_MMX | CPUID_PGE | CPUID_3DNOW | CPUID_3DNOWE;
break;
case 0x80000002: /* Processor name string */
EAX = 0x2d444d41; /* AMD-K6(tm)-III P */
EBX = 0x7428364b;
ECX = 0x492d296d;
EDX = 0x50204949;
break;
case 0x80000003: /* Processor name string */
EAX = 0x65636f72; /* rocessor */
EBX = 0x726f7373;
ECX = 0x00000000;
EDX = 0x00000000;
break;
case 0x80000005: /* Cache information */
EAX = 0;
EBX = 0x02800140; /* TLBs */
ECX = 0x20020220; /* L1 data cache */
EDX = 0x20020220; /* L1 instruction cache */
break;
case 0x80000006: /* L2 Cache information */
EAX = EBX = EDX = 0;
if (cpu_s->cpu_type == CPU_K6_3P)
ECX = 0x01004220;
else
ECX = 0x00804220;
break;
case 0x80000007: /* PowerNow information */
EAX = EBX = ECX = 0;
EDX = 7;
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
case CPU_PENTIUMMMX:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_MMX;
} else
EAX = EBX = ECX = EDX = 0;
break;
#ifdef USE_CYRIX_6X86
case CPU_Cx6x86:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x69727943; /* CyrixInstead */
EDX = 0x736e4978;
ECX = 0x64616574;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_Cx6x86L:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x69727943; /* CyrixInstead */
EDX = 0x736e4978;
ECX = 0x64616574;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_CMPXCHG8B;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_CxGX1:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x69727943; /* CyrixInstead */
EDX = 0x736e4978;
ECX = 0x64616574;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_CMPXCHG8B;
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_Cx6x86MX:
if (!EAX) {
EAX = 0x00000001;
EBX = 0x69727943; /* CyrixInstead */
EDX = 0x736e4978;
ECX = 0x64616574;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_CMPXCHG8B | CPUID_CMOV | CPUID_MMX;
} else
EAX = EBX = ECX = EDX = 0;
break;
#endif /* USE_CYRIX_6X86 */
case CPU_PENTIUMPRO:
if (!EAX) {
EAX = 0x00000002;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_SEP | CPUID_CMOV;
} else if (EAX == 2) {
EAX = 0x03020101; /* Instruction TLB: 4 KB pages, 4-way set associative, 32 entries
Instruction TLB: 4 MB pages, fully associative, 2 entries
Data TLB: 4 KB pages, 4-way set associative, 64 entries */
EBX = ECX = 0;
EDX = 0x06040a42; /* 2nd-level cache: 256 KB, 4-way set associative, 32-byte line size
1st-level data cache: 8 KB, 2-way set associative, 32-byte line size
Data TLB: 4 MB pages, 4-way set associative, 8 entries
1st-level instruction cache: 8 KB, 4-way set associative, 32-byte line size */
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_PENTIUM2:
if (!EAX) {
EAX = 0x00000002;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_MMX | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_SEP | CPUID_CMOV;
} else if (EAX == 2) {
EAX = 0x03020101; /* Instruction TLB: 4 KB pages, 4-way set associative, 32 entries
Instruction TLB: 4 MB pages, fully associative, 2 entries
Data TLB: 4 KB pages, 4-way set associative, 64 entries */
EBX = ECX = 0;
EDX = 0x0c040843; /* 2nd-level cache: 512 KB, 4-way set associative, 32-byte line size
1st-level data cache: 16 KB, 4-way set associative, 32-byte line size
Data TLB: 4 MB pages, 4-way set associative, 8 entries
1st-level instruction cache: 16 KB, 4-way set associative, 32-byte line size */
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_PENTIUM2D:
if (!EAX) {
EAX = 0x00000002;
EBX = 0x756e6547; /* GenuineIntel */
EDX = 0x49656e69;
ECX = 0x6c65746e;
} else if (EAX == 1) {
EAX = CPUID;
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CMPXCHG8B | CPUID_MMX | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_SEP | CPUID_FXSR | CPUID_CMOV;
} else if (EAX == 2) {
EAX = 0x03020101; /* Instruction TLB: 4 KB pages, 4-way set associative, 32 entries
Instruction TLB: 4 MB pages, fully associative, 2 entries
Data TLB: 4 KB pages, 4-way set associative, 64 entries */
EBX = ECX = 0;
if (cpu_f->package == CPU_PKG_SLOT2) /* Pentium II Xeon Drake */
EDX = 0x0c040844; /* 2nd-level cache: 1 MB, 4-way set associative, 32-byte line size
1st-level data cache: 16 KB, 4-way set associative, 32-byte line size
Data TLB: 4 MB pages, 4-way set associative, 8 entries
1st-level instruction cache: 16 KB, 4-way set associative, 32-byte line size */
else if (!strncmp(cpu_f->internal_name, "celeron", 7)) { /* Celeron */
if (CPUID >= 0x660) /* Mendocino */
EDX = 0x0c040841; /* 2nd-level cache: 128 KB, 4-way set associative, 32-byte line size */
else /* Covington */
EDX = 0x0c040840; /* No 2nd-level cache */
} else /* Pentium II Deschutes and OverDrive */
EDX = 0x0c040843; /* 2nd-level cache: 512 KB, 4-way set associative, 32-byte line size */
} else
EAX = EBX = ECX = EDX = 0;
break;
case CPU_CYRIX3S:
switch (EAX) {
case 0:
EAX = 0x00000001;
if (msr.fcr2 & (1 << 14)) {
EBX = msr.fcr3 >> 32;
ECX = msr.fcr3 & 0xffffffff;
EDX = msr.fcr2 >> 32;
} else {
EBX = 0x746e6543; /* CentaurHauls */
ECX = 0x736c7561;
EDX = 0x48727561;
}
break;
case 1:
EAX = ((msr.fcr2 & 0x0ff0) ? ((msr.fcr2 & 0x0ff0) | (CPUID & 0xf00f)) : CPUID);
EBX = ECX = 0;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_MMX | CPUID_MTRR;
if (cpu_has_feature(CPU_FEATURE_CX8))
EDX |= CPUID_CMPXCHG8B;
if (msr.fcr & (1 << 7))
EDX |= CPUID_PGE;
break;
case 0x80000000:
EAX = 0x80000005;
break;
case 0x80000001:
EAX = CPUID;
EDX = CPUID_FPU | CPUID_DE | CPUID_TSC | CPUID_MSR | CPUID_MCE | CPUID_MMX | CPUID_MTRR | CPUID_3DNOW;
if (cpu_has_feature(CPU_FEATURE_CX8))
EDX |= CPUID_CMPXCHG8B;
if (msr.fcr & (1 << 7))
EDX |= CPUID_PGE;
break;
case 0x80000002: /* Processor name string */
EAX = 0x20414956; /* VIA Samuel */
EBX = 0x756d6153;
ECX = 0x00006c65;
EDX = 0x00000000;
break;
case 0x80000005: /* Cache information */
EBX = 0x08800880; /* TLBs */
ECX = 0x40040120; /* L1 data cache */
EDX = 0x40020120; /* L1 instruction cache */
break;
default:
EAX = EBX = ECX = EDX = 0;
break;
}
break;
}
}
void
cpu_ven_reset(void)
{
memset(&msr, 0, sizeof(msr));
switch (cpu_s->cpu_type) {
case CPU_WINCHIP:
case CPU_WINCHIP2:
msr.fcr = (1 << 8) | (1 << 9) | (1 << 12) | (1 << 16) | (1 << 19) | (1 << 21);
msr.mcr_ctrl = 0xf8000000;
if (cpu_s->cpu_type == CPU_WINCHIP2) {
msr.fcr |= (1 << 18) | (1 << 20);
msr.mcr_ctrl |= (1 << 17);
}
break;
case CPU_K6_2P:
case CPU_K6_3P:
case CPU_K6_3:
case CPU_K6_2C:
msr.amd_psor = (cpu_s->cpu_type >= CPU_K6_3) ? 0x008cULL : 0x018cULL;
fallthrough;
case CPU_K6_2:
#ifdef USE_AMD_K5
case CPU_K5:
case CPU_5K86:
#endif /* USE_AMD_K5 */
case CPU_K6:
msr.amd_efer = (cpu_s->cpu_type >= CPU_K6_2C) ? 2ULL : 0ULL;
break;
case CPU_PENTIUMPRO:
case CPU_PENTIUM2:
case CPU_PENTIUM2D:
msr.mtrr_cap = 0x00000508ULL;
break;
case CPU_CYRIX3S:
msr.fcr = (1 << 7) | (1 << 8) | (1 << 9) | (1 << 12) | (1 << 16) | (1 << 18) | (1 << 19) |
(1 << 20) | (1 << 21);
break;
}
}
void
cpu_RDMSR(void)
{
switch (cpu_s->cpu_type) {
case CPU_IBM386SLC:
case CPU_IBM486SLC:
case CPU_IBM486BL:
EAX = EDX = 0;
switch (ECX) {
/* Processor Operation Register */
case 0x1000:
EAX = msr.ibm_por & ((cpu_s->cpu_type > CPU_IBM386SLC) ? 0xffeff : 0xfeff);
break;
/* Cache Region Control Register */
case 0x1001:
EAX = msr.ibm_crcr & 0xffffffff;
EDX = (msr.ibm_crcr >> 32) & 0x0000ffff;
break;
/* Processor Operation Register */
case 0x1002:
if ((cpu_s->cpu_type > CPU_IBM386SLC) && cpu_s->multi)
EAX = msr.ibm_por2 & 0x3f000000;
break;
/* Processor Control Register */
case 0x1004:
if (cpu_s->cpu_type > CPU_IBM486SLC)
EAX = msr.ibm_pcr & 0x00d6001a;
break;
}
break;
case CPU_WINCHIP:
case CPU_WINCHIP2:
EAX = EDX = 0;
switch (ECX) {
/* Pentium Processor Parity Reversal Register */
case 0x02:
EAX = msr.tr1;
break;
/* Pentium Processor New Feature Control */
case 0x0e:
EAX = msr.tr12;
break;
/* Time Stamp Counter */
case 0x10:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
/* Performance Monitor - Control and Event Select */
case 0x11:
EAX = msr.cesr;
break;
/* Performance Monitor - Event Counter 0 */
case 0x12:
EAX = msr.pmc[0] & 0xffffffff;
EDX = msr.pmc[0] >> 32;
break;
/* Performance Monitor - Event Counter 1 */
case 0x13:
EAX = msr.pmc[1] & 0xffffffff;
EDX = msr.pmc[1] >> 32;
break;
/* Feature Control Register */
case 0x107:
EAX = msr.fcr;
break;
/* Feature Control Register 2 */
case 0x108:
EAX = msr.fcr2 & 0xffffffff;
EDX = msr.fcr2 >> 32;
break;
/* Feature Control Register 4 */
case 0x10a:
EAX = cpu_multi & 3;
break;
/* Memory Configuration Register Control */
case 0x120:
EAX = msr.mcr_ctrl;
break;
/* Unknown */
case 0x131:
case 0x142 ... 0x145:
case 0x147:
case 0x150:
case 0x151:
break;
}
break;
case CPU_CYRIX3S:
EAX = EDX = 0;
switch (ECX) {
/* Machine Check Exception Address */
case 0x00:
/* Machine Check Exception Type */
case 0x01:
break;
/* Time Stamp Counter */
case 0x10:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
/* EBL_CR_POWERON - Processor Hard Power-On Configuration */
case 0x2a:
EAX = 0xc4000000;
EDX = 0;
if (cpu_dmulti == 3)
EAX |= ((0 << 25) | (0 << 24) | (0 << 23) | (1 << 22));
else if (cpu_dmulti == 3.5)
EAX |= ((0 << 25) | (1 << 24) | (0 << 23) | (1 << 22));
else if (cpu_dmulti == 4)
EAX |= ((0 << 25) | (0 << 24) | (1 << 23) | (0 << 22));
else if (cpu_dmulti == 4.5)
EAX |= ((0 << 25) | (1 << 24) | (1 << 23) | (0 << 22));
else if (cpu_dmulti == 5)
EAX |= 0;
else if (cpu_dmulti == 5.5)
EAX |= ((0 << 25) | (1 << 24) | (0 << 23) | (0 << 22));
else if (cpu_dmulti == 6)
EAX |= ((1 << 25) | (0 << 24) | (1 << 23) | (1 << 22));
else if (cpu_dmulti == 6.5)
EAX |= ((1 << 25) | (1 << 24) | (1 << 23) | (1 << 22));
else if (cpu_dmulti == 7)
EAX |= ((1 << 25) | (0 << 24) | (0 << 23) | (1 << 22));
else
EAX |= ((0 << 25) | (0 << 24) | (0 << 23) | (1 << 22));
if (cpu_busspeed >= 84000000)
EAX |= (1 << 19);
break;
/* PERFCTR0 - Performance Counter Register 0 - aliased to TSC */
case 0xc1:
EAX = tsc & 0xffffffff;
EDX = (tsc >> 32) & 0xff;
break;
/* PERFCTR1 - Performance Counter Register 1 */
case 0xc2:
EAX = msr.perfctr[1] & 0xffffffff;
EDX = msr.perfctr[1] >> 32;
break;
/* BBL_CR_CTL3 - L2 Cache Control Register 3 */
case 0x11e:
EAX = 0x800000; /* L2 cache disabled */
break;
/* EVNTSEL0 - Performance Counter Event Select 0 - hardcoded */
case 0x186:
EAX = 0x470079;
break;
/* EVNTSEL1 - Performance Counter Event Select 1 */
case 0x187:
EAX = msr.evntsel[1] & 0xffffffff;
EDX = msr.evntsel[1] >> 32;
break;
/* Feature Control Register */
case 0x1107:
EAX = msr.fcr;
break;
/* Feature Control Register 2 */
case 0x1108:
EAX = msr.fcr2 & 0xffffffff;
EDX = msr.fcr2 >> 32;
break;
/* ECX & 0: MTRRphysBase0 ... MTRRphysBase7
ECX & 1: MTRRphysMask0 ... MTRRphysMask7 */
case 0x200 ... 0x20f:
if (ECX & 1) {
EAX = msr.mtrr_physmask[(ECX - 0x200) >> 1] & 0xffffffff;
EDX = msr.mtrr_physmask[(ECX - 0x200) >> 1] >> 32;
} else {
EAX = msr.mtrr_physbase[(ECX - 0x200) >> 1] & 0xffffffff;
EDX = msr.mtrr_physbase[(ECX - 0x200) >> 1] >> 32;
}
break;
/* MTRRfix64K_00000 */
case 0x250:
EAX = msr.mtrr_fix64k_8000 & 0xffffffff;
EDX = msr.mtrr_fix64k_8000 >> 32;
break;
/* MTRRfix16K_80000 */
case 0x258:
EAX = msr.mtrr_fix16k_8000 & 0xffffffff;
EDX = msr.mtrr_fix16k_8000 >> 32;
break;
/* MTRRfix16K_A0000 */
case 0x259:
EAX = msr.mtrr_fix16k_a000 & 0xffffffff;
EDX = msr.mtrr_fix16k_a000 >> 32;
break;
/* MTRRfix4K_C0000 ... MTRRfix4K_F8000 */
case 0x268 ... 0x26f:
EAX = msr.mtrr_fix4k[ECX - 0x268] & 0xffffffff;
EDX = msr.mtrr_fix4k[ECX - 0x268] >> 32;
break;
/* MTRRdefType */
case 0x2ff:
EAX = msr.mtrr_deftype & 0xffffffff;
EDX = msr.mtrr_deftype >> 32;
break;
}
break;
#ifdef USE_AMD_K5
case CPU_K5:
case CPU_5K86:
#endif /* USE_AMD_K5 */
case CPU_K6:
case CPU_K6_2:
case CPU_K6_2C:
case CPU_K6_3:
case CPU_K6_2P:
case CPU_K6_3P:
EAX = 0;
/* EDX is left unchanged when reading this MSR! */
if (ECX != 0x82)
EDX = 0;
switch (ECX) {
/* Machine Check Address Register */
case 0x00000000:
EAX = msr.mcar & 0xffffffff;
EDX = msr.mcar >> 32;
break;
/* Machine Check Type Register */
case 0x00000001:
EAX = msr.mctr & 0xffffffff;
EDX = msr.mctr >> 32;
break;
/* Test Register 12 */
case 0x0000000e:
EAX = msr.tr12;
break;
/* Time Stamp Counter */
case 0x00000010:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
/* Array Access Register */
case 0x00000082:
if (cpu_s->cpu_type > CPU_5K86)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_aar & 0xffffffff;
/* EDX is left unchanged! */
break;
/* Hardware Configuration Register */
case 0x00000083:
EAX = msr.amd_hwcr & 0xffffffff;
EDX = msr.amd_hwcr >> 32;
break;
/* Write Allocate Top-of-Memory and Control Register */
case 0x00000085:
if (cpu_s->cpu_type != CPU_5K86)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_watmcr & 0xffffffff;
EDX = msr.amd_watmcr >> 32;
break;
/* Write Allocate Programmable Memory Range Register */
case 0x00000086:
if (cpu_s->cpu_type != CPU_5K86)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_wapmrr & 0xffffffff;
EDX = msr.amd_wapmrr >> 32;
break;
/* Extended Feature Enable Register */
case 0xc0000080:
EAX = msr.amd_efer & 0xffffffff;
EDX = msr.amd_efer >> 32;
break;
/* SYSCALL Target Address Register */
case 0xc0000081:
if (cpu_s->cpu_type < CPU_K6_2)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_star & 0xffffffff;
EDX = msr.amd_star >> 32;
break;
/* Write-Handling Control Register */
case 0xc0000082:
EAX = msr.amd_whcr & 0xffffffff;
EDX = msr.amd_whcr >> 32;
break;
/* UC/WC Cacheability Control Register */
case 0xc0000085:
if (cpu_s->cpu_type < CPU_K6_2C)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_uwccr & 0xffffffff;
EDX = msr.amd_uwccr >> 32;
break;
/* Enhanced Power Management Register */
case 0xc0000086:
if (cpu_s->cpu_type < CPU_K6_2P)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_epmr & 0xffffffff;
EDX = msr.amd_epmr >> 32;
break;
/* Processor State Observability Register */
case 0xc0000087:
if (cpu_s->cpu_type < CPU_K6_2C)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_psor & 0xffffffff;
EDX = msr.amd_psor >> 32;
break;
/* Page Flush/Invalidate Register */
case 0xc0000088:
if (cpu_s->cpu_type < CPU_K6_2C)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_pfir & 0xffffffff;
EDX = msr.amd_pfir >> 32;
break;
/* Level-2 Cache Array Access Register */
case 0xc0000089:
if (cpu_s->cpu_type < CPU_K6_3)
goto amd_k_invalid_rdmsr;
EAX = msr.amd_l2aar & 0xffffffff;
EDX = msr.amd_l2aar >> 32;
break;
default:
amd_k_invalid_rdmsr:
x86gpf(NULL, 0);
break;
}
break;
case CPU_P24T:
case CPU_PENTIUM:
case CPU_PENTIUMMMX:
EAX = EDX = 0;
/* Filter out the upper 27 bits when ECX value is over 0x80000000, as per:
Ralf Brown, Pentium Model-Specific Registers and What They Reveal.
path_to_url~ralf/papers/highmsr.html
But leave the bit 31 intact to be able to handle both low and high
MSRs in a single switch block. */
switch (ECX & (ECX > 0x7fffffff ? 0x8000001f : 0x7fffffff)) {
/* Machine Check Exception Address */
case 0x00000000:
case 0x80000000:
EAX = msr.mcar & 0xffffffff;
EDX = msr.mcar >> 32;
break;
/* Machine Check Exception Type */
case 0x00000001:
case 0x80000001:
EAX = msr.mctr & 0xffffffff;
EDX = msr.mctr >> 32;
msr.mctr &= ~0x1; /* clear the machine check pending bit */
break;
/* TR1 - Parity Reversal Test Register */
case 0x00000002:
case 0x80000002:
EAX = msr.tr1;
break;
/* TR2 - Instruction Cache End Bit */
case 0x00000004:
case 0x80000004:
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
goto pentium_invalid_rdmsr;
EAX = msr.tr2;
break;
/* TR3 - Cache Test Data */
case 0x00000005:
case 0x80000005:
EAX = msr.tr3;
break;
/* TR4 - Cache Test Tag */
case 0x00000006:
case 0x80000006:
EAX = msr.tr4;
break;
/* TR5 - Cache Test Control */
case 0x00000007:
case 0x80000007:
EAX = msr.tr5;
break;
/* TR6 - TLB Test Command */
case 0x00000008:
case 0x80000008:
EAX = msr.tr6;
break;
/* TR7 - TLB Test Data */
case 0x00000009:
case 0x80000009:
EAX = msr.tr7;
break;
/* TR9 - Branch Target Buffer Tag */
case 0x0000000b:
case 0x8000000b:
EAX = msr.tr9;
break;
/* TR10 - Branch Target Buffer Target */
case 0x0000000c:
case 0x8000000c:
EAX = msr.tr10;
break;
/* TR11 - Branch Target Buffer Control */
case 0x0000000d:
case 0x8000000d:
EAX = msr.tr11;
break;
/* TR12 - New Feature Control */
case 0x0000000e:
case 0x8000000e:
EAX = msr.tr12;
break;
/* Time Stamp Counter */
case 0x00000010:
case 0x80000010:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
/* Performance Monitor - Control and Event Select */
case 0x00000011:
case 0x80000011:
EAX = msr.cesr;
break;
/* Performance Monitor - Event Counter 0 */
case 0x00000012:
case 0x80000012:
EAX = msr.pmc[0] & 0xffffffff;
EDX = msr.pmc[0] >> 32;
break;
/* Performance Monitor - Event Counter 1 */
case 0x00000013:
case 0x80000013:
EAX = msr.pmc[1] & 0xffffffff;
EDX = msr.pmc[1] >> 32;
break;
/* Unknown */
case 0x00000014:
case 0x80000014:
if ((CPUID & 0xfff) <= 0x520)
goto pentium_invalid_rdmsr;
break;
/* Unknown, possibly paging-related; initial value is 0004h,
becomes 0008h once paging is enabled */
case 0x80000018:
EAX = ((cr0 & (1 << 31)) ? 0x00000008 : 0x00000004);
break;
/* Floating point - last prefetched opcode
bits 10-8: low three bits of first byte of FP instruction
bits 7-0: second byte of floating-point instruction */
case 0x80000019:
EAX = 0;
break;
/* Floating point - last executed non-control opcode */
case 0x8000001a:
EAX = 0;
break;
/* Floating point - last non-control exception opcode - part
of FSTENV/FSAVE'd environment */
case 0x8000001b:
EAX = msr.fp_last_xcpt;
break;
/* Unknown */
case 0x8000001c:
EAX = 0x00000004;
break;
/* Probe Mode Control */
case 0x8000001d:
EAX = msr.probe_ctl;
break;
/* Unknown, possibly scratchpad register */
case 0x8000001e:
EAX = msr.ecx8000001e;
break;
/* Unknown, possibly scratchpad register */
case 0x8000001f:
EAX = msr.ecx8000001f;
break;
/* Reserved/Unimplemented */
case 0x80000003:
case 0x8000000a:
case 0x8000000f:
case 0x80000015 ... 0x80000017:
EAX = (ECX & 0x1f) * 2;
break;
default:
pentium_invalid_rdmsr:
cpu_log("RDMSR: Invalid MSR: %08X\n", ECX);
x86gpf(NULL, 0);
break;
}
cpu_log("RDMSR: ECX = %08X, val = %08X%08X\n", ECX, EDX, EAX);
break;
#ifdef USE_CYRIX_6X86
case CPU_Cx6x86:
case CPU_Cx6x86L:
case CPU_CxGX1:
case CPU_Cx6x86MX:
switch (ECX) {
/* Test Data */
case 0x03:
EAX = msr.tr3;
break;
/* Test Address */
case 0x04:
EAX = msr.tr4;
break;
/* Test Command/Status */
case 0x05:
EAX = msr.tr5;
break;
/* Time Stamp Counter */
case 0x10:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
/* Performance Monitor - Control and Event Select */
case 0x11:
EAX = msr.cesr;
break;
/* Performance Monitor - Event Counter 0 */
case 0x12:
EAX = msr.pmc[0] & 0xffffffff;
EDX = msr.pmc[0] >> 32;
break;
/* Performance Monitor - Event Counter 1 */
case 0x13:
EAX = msr.pmc[1] & 0xffffffff;
EDX = msr.pmc[1] >> 32;
break;
}
cpu_log("RDMSR: ECX = %08X, val = %08X%08X\n", ECX, EDX, EAX);
break;
#endif /* USE_CYRIX_6X86 */
case CPU_PENTIUMPRO:
case CPU_PENTIUM2:
case CPU_PENTIUM2D:
EAX = EDX = 0;
/* Per RichardG's probing of a real Deschutes using my RDMSR tool,
we have discovered that the top 18 bits are filtered out. */
switch (ECX & 0x00003fff) {
/* Machine Check Exception Address */
case 0x00:
/* Machine Check Exception Type */
case 0x01:
break;
/* Time Stamp Counter */
case 0x10:
EAX = tsc & 0xffffffff;
EDX = tsc >> 32;
break;
/* IA32_PLATFORM_ID - Platform ID */
case 0x17:
if (cpu_s->cpu_type < CPU_PENTIUM2D)
goto i686_invalid_rdmsr;
if (cpu_f->package == CPU_PKG_SLOT2)
EDX |= (1 << 19);
else if (cpu_f->package == CPU_PKG_SOCKET370)
EDX |= (1 << 20);
break;
/* Unknown */
case 0x18:
break;
/* IA32_APIC_BASE - APIC Base Address */
case 0x1B:
EAX = msr.apic_base & 0xffffffff;
EDX = msr.apic_base >> 32;
cpu_log("APIC_BASE read : %08X%08X\n", EDX, EAX);
break;
/* Unknown (undocumented?) MSR used by the Hyper-V BIOS */
case 0x20:
EAX = msr.ecx20 & 0xffffffff;
EDX = msr.ecx20 >> 32;
break;
/* Unknown */
case 0x21:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_rdmsr;
break;
/* EBL_CR_POWERON - Processor Hard Power-On Configuration */
case 0x2a:
EAX = 0xc4000000;
EDX = 0;
if (cpu_dmulti == 2.5)
EAX |= ((0 << 25) | (1 << 24) | (1 << 23) | (1 << 22));
else if (cpu_dmulti == 3)
EAX |= ((0 << 25) | (0 << 24) | (0 << 23) | (1 << 22));
else if (cpu_dmulti == 3.5)
EAX |= ((0 << 25) | (1 << 24) | (0 << 23) | (1 << 22));
else if (cpu_dmulti == 4)
EAX |= ((0 << 25) | (0 << 24) | (1 << 23) | (0 << 22));
else if (cpu_dmulti == 4.5)
EAX |= ((0 << 25) | (1 << 24) | (1 << 23) | (0 << 22));
else if (cpu_dmulti == 5)
EAX |= 0;
else if (cpu_dmulti == 5.5)
EAX |= ((0 << 25) | (1 << 24) | (0 << 23) | (0 << 22));
else if (cpu_dmulti == 6)
EAX |= ((1 << 25) | (0 << 24) | (1 << 23) | (1 << 22));
else if (cpu_dmulti == 6.5)
EAX |= ((1 << 25) | (1 << 24) | (1 << 23) | (1 << 22));
else if (cpu_dmulti == 7)
EAX |= ((1 << 25) | (0 << 24) | (0 << 23) | (1 << 22));
else if (cpu_dmulti == 7.5)
EAX |= ((1 << 25) | (1 << 24) | (0 << 23) | (1 << 22));
else if (cpu_dmulti == 8)
EAX |= ((1 << 25) | (0 << 24) | (1 << 23) | (0 << 22));
else
EAX |= ((0 << 25) | (1 << 24) | (1 << 23) | (1 << 22));
if (cpu_s->cpu_type != CPU_PENTIUMPRO) {
if (cpu_busspeed >= 84000000)
EAX |= (1 << 19);
}
break;
/* Unknown */
case 0x32:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_rdmsr;
break;
/* TEST_CTL - Test Control Register */
case 0x33:
EAX = msr.test_ctl;
break;
/* Unknown */
case 0x34:
case 0x3a:
case 0x3b:
case 0x50 ... 0x54:
break;
/* BIOS_UPDT_TRIG - BIOS Update Trigger */
case 0x79:
EAX = msr.bios_updt & 0xffffffff;
EDX = msr.bios_updt >> 32;
break;
/* BBL_CR_D0 ... BBL_CR_D3 - Chunk 0..3 Data Register
8Bh: BIOS_SIGN - BIOS Update Signature */
case 0x88 ... 0x8b:
EAX = msr.bbl_cr_dx[ECX - 0x88] & 0xffffffff;
EDX = msr.bbl_cr_dx[ECX - 0x88] >> 32;
break;
/* Unknown */
case 0xae:
break;
/* PERFCTR0 - Performance Counter Register 0 */
case 0xc1:
/* PERFCTR1 - Performance Counter Register 1 */
case 0xc2:
EAX = msr.perfctr[ECX - 0xC1] & 0xffffffff;
EDX = msr.perfctr[ECX - 0xC1] >> 32;
break;
/* MTRRcap */
case 0xfe:
EAX = msr.mtrr_cap & 0xffffffff;
EDX = msr.mtrr_cap >> 32;
break;
/* BBL_CR_ADDR - L2 Cache Address Register */
case 0x116:
EAX = msr.bbl_cr_addr & 0xffffffff;
EDX = msr.bbl_cr_addr >> 32;
break;
/* BBL_CR_DECC - L2 Cache Date ECC Register */
case 0x118:
EAX = msr.bbl_cr_decc & 0xffffffff;
EDX = msr.bbl_cr_decc >> 32;
break;
/* BBL_CR_CTL - L2 Cache Control Register */
case 0x119:
EAX = msr.bbl_cr_ctl & 0xffffffff;
EDX = msr.bbl_cr_ctl >> 32;
break;
/* BBL_CR_TRIG - L2 Cache Trigger Register */
case 0x11a:
EAX = msr.bbl_cr_trig & 0xffffffff;
EDX = msr.bbl_cr_trig >> 32;
break;
/* BBL_CR_BUSY - L2 Cache Busy Register */
case 0x11b:
EAX = msr.bbl_cr_busy & 0xffffffff;
EDX = msr.bbl_cr_busy >> 32;
break;
/* BBL_CR_CTL3 - L2 Cache Control Register 3 */
case 0x11e:
EAX = msr.bbl_cr_ctl3 & 0xffffffff;
EDX = msr.bbl_cr_ctl3 >> 32;
break;
/* Unknown */
case 0x131:
case 0x14e ... 0x151:
case 0x154:
case 0x15b:
case 0x15f:
break;
/* SYSENTER_CS - SYSENTER target CS */
case 0x174:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_rdmsr;
EAX &= 0xffff0000;
EAX |= msr.sysenter_cs;
EDX = 0x00000000;
break;
/* SYSENTER_ESP - SYSENTER target ESP */
case 0x175:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_rdmsr;
EAX = msr.sysenter_esp;
EDX = 0x00000000;
break;
/* SYSENTER_EIP - SYSENTER target EIP */
case 0x176:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_rdmsr;
EAX = msr.sysenter_eip;
EDX = 0x00000000;
break;
/* MCG_CAP - Machine Check Global Capability */
case 0x179:
EAX = 0x00000105;
EDX = 0x00000000;
break;
/* MCG_STATUS - Machine Check Global Status */
case 0x17a:
break;
/* MCG_CTL - Machine Check Global Control */
case 0x17b:
EAX = msr.mcg_ctl & 0xffffffff;
EDX = msr.mcg_ctl >> 32;
break;
/* EVNTSEL0 - Performance Counter Event Select 0 */
case 0x186:
/* EVNTSEL1 - Performance Counter Event Select 1 */
case 0x187:
EAX = msr.evntsel[ECX - 0x186] & 0xffffffff;
EDX = msr.evntsel[ECX - 0x186] >> 32;
break;
/* Unknown */
case 0x1d3:
break;
/* DEBUGCTLMSR - Debugging Control Register */
case 0x1d9:
EAX = msr.debug_ctl;
break;
/* LASTBRANCHFROMIP - address from which a branch was last taken */
case 0x1db:
/* LASTBRANCHTOIP - destination address of the last taken branch instruction */
case 0x1dc:
/* LASTINTFROMIP - address at which an interrupt last occurred */
case 0x1dd:
/* LASTINTTOIP - address to which the last interrupt caused a branch */
case 0x1de:
break;
/* ROB_CR_BKUPTMPDR6 */
case 0x1e0:
EAX = msr.rob_cr_bkuptmpdr6;
break;
/* ECX & 0: MTRRphysBase0 ... MTRRphysBase7
ECX & 1: MTRRphysMask0 ... MTRRphysMask7 */
case 0x200 ... 0x20f:
if (ECX & 1) {
EAX = msr.mtrr_physmask[(ECX - 0x200) >> 1] & 0xffffffff;
EDX = msr.mtrr_physmask[(ECX - 0x200) >> 1] >> 32;
} else {
EAX = msr.mtrr_physbase[(ECX - 0x200) >> 1] & 0xffffffff;
EDX = msr.mtrr_physbase[(ECX - 0x200) >> 1] >> 32;
}
break;
/* MTRRfix64K_00000 */
case 0x250:
EAX = msr.mtrr_fix64k_8000 & 0xffffffff;
EDX = msr.mtrr_fix64k_8000 >> 32;
break;
/* MTRRfix16K_80000 */
case 0x258:
EAX = msr.mtrr_fix16k_8000 & 0xffffffff;
EDX = msr.mtrr_fix16k_8000 >> 32;
break;
/* MTRRfix16K_A0000 */
case 0x259:
EAX = msr.mtrr_fix16k_a000 & 0xffffffff;
EDX = msr.mtrr_fix16k_a000 >> 32;
break;
/* MTRRfix4K_C0000 ... MTRRfix4K_F8000 */
case 0x268 ... 0x26f:
EAX = msr.mtrr_fix4k[ECX - 0x268] & 0xffffffff;
EDX = msr.mtrr_fix4k[ECX - 0x268] >> 32;
break;
/* Page Attribute Table */
case 0x277:
if (cpu_s->cpu_type < CPU_PENTIUM2D)
goto i686_invalid_rdmsr;
EAX = msr.pat & 0xffffffff;
EDX = msr.pat >> 32;
break;
/* Unknown */
case 0x280:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_rdmsr;
break;
/* MTRRdefType */
case 0x2ff:
EAX = msr.mtrr_deftype & 0xffffffff;
EDX = msr.mtrr_deftype >> 32;
break;
/* MC0_CTL - Machine Check 0 Control */
case 0x400:
/* MC1_CTL - Machine Check 1 Control */
case 0x404:
/* MC2_CTL - Machine Check 2 Control */
case 0x408:
/* MC4_CTL - Machine Check 4 Control */
case 0x40c:
/* MC3_CTL - Machine Check 3 Control */
case 0x410:
EAX = msr.mca_ctl[(ECX - 0x400) >> 2] & 0xffffffff;
EDX = msr.mca_ctl[(ECX - 0x400) >> 2] >> 32;
break;
/* MC0_STATUS - Machine Check 0 Status */
case 0x401:
/* MC0_ADDR - Machine Check 0 Address */
case 0x402:
/* MC1_STATUS - Machine Check 1 Status */
case 0x405:
/* MC1_ADDR - Machine Check 1 Address */
case 0x406:
/* MC2_STATUS - Machine Check 2 Status */
case 0x409:
/* MC2_ADDR - Machine Check 2 Address */
case 0x40a:
/* MC4_STATUS - Machine Check 4 Status */
case 0x40d:
/* MC4_ADDR - Machine Check 4 Address */
case 0x40e:
/* MC3_STATUS - Machine Check 3 Status */
case 0x411:
/* MC3_ADDR - Machine Check 3 Address */
case 0x412:
break;
/* Unknown */
case 0x570:
EAX = msr.ecx570 & 0xffffffff;
EDX = msr.ecx570 >> 32;
break;
/* Unknown, possibly debug registers? */
case 0x1000 ... 0x1007:
/* Unknown, possibly control registers? */
case 0x2000:
case 0x2002 ... 0x2004:
break;
default:
i686_invalid_rdmsr:
cpu_log("RDMSR: Invalid MSR: %08X\n", ECX);
x86gpf(NULL, 0);
break;
}
break;
}
cpu_log("RDMSR %08X %08X%08X\n", ECX, EDX, EAX);
}
void
cpu_WRMSR(void)
{
uint64_t temp;
cpu_log("WRMSR %08X %08X%08X\n", ECX, EDX, EAX);
switch (cpu_s->cpu_type) {
case CPU_IBM386SLC:
case CPU_IBM486SLC:
case CPU_IBM486BL:
switch (ECX) {
/* Processor Operation Register */
case 0x1000:
msr.ibm_por = EAX & ((cpu_s->cpu_type > CPU_IBM386SLC) ? 0xffeff : 0xfeff);
cpu_cache_int_enabled = (EAX & (1 << 7));
break;
/* Cache Region Control Register */
case 0x1001:
msr.ibm_crcr = EAX | ((uint64_t) (EDX & 0x0000ffff) << 32);
break;
/* Processor Operation Register */
case 0x1002:
if ((cpu_s->cpu_type > CPU_IBM386SLC) && cpu_s->multi)
msr.ibm_por2 = EAX & 0x3f000000;
break;
/* Processor Control Register */
case 0x1004:
if (cpu_s->cpu_type > CPU_IBM486SLC)
msr.ibm_pcr = EAX & 0x00d6001a;
break;
}
break;
case CPU_WINCHIP:
case CPU_WINCHIP2:
switch (ECX) {
/* Pentium Processor Parity Reversal Register */
case 0x02:
msr.tr1 = EAX & 2;
break;
/* Pentium Processor New Feature Control */
case 0x0e:
msr.tr12 = EAX & 0x248;
break;
/* Time Stamp Counter */
case 0x10:
timer_set_new_tsc(EAX | ((uint64_t) EDX << 32));
break;
/* Performance Monitor - Control and Event Select */
case 0x11:
msr.cesr = EAX & 0xff00ff;
break;
/* Performance Monitor - Event Counter 0 */
case 0x12:
msr.pmc[0] = EAX | ((uint64_t) EDX << 32);
break;
/* Performance Monitor - Event Counter 1 */
case 0x13:
msr.pmc[1] = EAX | ((uint64_t) EDX << 32);
break;
/* Feature Control Register */
case 0x107:
msr.fcr = EAX;
if (EAX & (1 << 9))
cpu_features |= CPU_FEATURE_MMX;
else
cpu_features &= ~CPU_FEATURE_MMX;
if (EAX & (1 << 1))
cpu_features |= CPU_FEATURE_CX8;
else
cpu_features &= ~CPU_FEATURE_CX8;
if ((EAX & (1 << 20)) && cpu_s->cpu_type >= CPU_WINCHIP2)
cpu_features |= CPU_FEATURE_3DNOW;
else
cpu_features &= ~CPU_FEATURE_3DNOW;
if (EAX & (1 << 29))
CPUID = 0;
else
CPUID = cpu_s->cpuid_model;
break;
/* Feature Control Register 2 */
case 0x108:
msr.fcr2 = EAX | ((uint64_t) EDX << 32);
break;
/* Feature Control Register 3 */
case 0x109:
msr.fcr3 = EAX | ((uint64_t) EDX << 32);
break;
/* Memory Configuration Register 0..7 */
case 0x110 ... 0x117:
temp = ECX - 0x110;
if (cpu_s->cpu_type == CPU_WINCHIP2) {
if (EAX & 0x1f)
msr.mcr_ctrl |= (1 << (temp + 9));
else
msr.mcr_ctrl &= ~(1 << (temp + 9));
}
msr.mcr[temp] = EAX | ((uint64_t) EDX << 32);
break;
/* Memory Configuration Register Control */
case 0x120:
msr.mcr_ctrl = EAX & ((cpu_s->cpu_type == CPU_WINCHIP2) ? 0x1df : 0x1f);
break;
/* Unknown */
case 0x131:
case 0x142 ... 0x145:
case 0x147:
case 0x150:
case 0x151:
break;
}
break;
case CPU_CYRIX3S:
switch (ECX) {
/* Machine Check Exception Address */
case 0x00:
/* Machine Check Exception Type */
case 0x01:
break;
/* Time Stamp Counter */
case 0x10:
timer_set_new_tsc(EAX | ((uint64_t) EDX << 32));
break;
/* PERFCTR0 - Performance Counter Register 0 - aliased to TSC */
case 0xc1:
break;
/* PERFCTR0 - Performance Counter Register 1 */
case 0xc2:
msr.perfctr[1] = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_CTL3 - L2 Cache Control Register 3 */
case 0x11e:
/* EVNTSEL0 - Performance Counter Event Select 0 - hardcoded */
case 0x186:
break;
/* EVNTSEL1 - Performance Counter Event Select 1 */
case 0x187:
msr.evntsel[1] = EAX | ((uint64_t) EDX << 32);
break;
/* Feature Control Register */
case 0x1107:
msr.fcr = EAX;
if (EAX & (1 << 1))
cpu_features |= CPU_FEATURE_CX8;
else
cpu_features &= ~CPU_FEATURE_CX8;
if (EAX & (1 << 7))
cpu_CR4_mask |= CR4_PGE;
else
cpu_CR4_mask &= ~CR4_PGE;
break;
/* Feature Control Register 2 */
case 0x1108:
msr.fcr2 = EAX | ((uint64_t) EDX << 32);
break;
/* Feature Control Register 3 */
case 0x1109:
msr.fcr3 = EAX | ((uint64_t) EDX << 32);
break;
/* ECX & 0: MTRRphysBase0 ... MTRRphysBase7
ECX & 1: MTRRphysMask0 ... MTRRphysMask7 */
case 0x200 ... 0x20f:
if (ECX & 1)
msr.mtrr_physmask[(ECX - 0x200) >> 1] = EAX | ((uint64_t) EDX << 32);
else
msr.mtrr_physbase[(ECX - 0x200) >> 1] = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix64K_00000 */
case 0x250:
msr.mtrr_fix64k_8000 = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix16K_80000 */
case 0x258:
msr.mtrr_fix16k_8000 = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix16K_A0000 */
case 0x259:
msr.mtrr_fix16k_a000 = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix4K_C0000 ... MTRRfix4K_F8000 */
case 0x268 ... 0x26f:
msr.mtrr_fix4k[ECX - 0x268] = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRdefType */
case 0x2ff:
msr.mtrr_deftype = EAX | ((uint64_t) EDX << 32);
break;
}
break;
#ifdef USE_AMD_K5
case CPU_K5:
case CPU_5K86:
#endif /* USE_AMD_K5 */
case CPU_K6:
case CPU_K6_2:
case CPU_K6_2C:
case CPU_K6_3:
case CPU_K6_2P:
case CPU_K6_3P:
switch (ECX) {
/* Machine Check Address Register */
case 0x00000000:
if (cpu_s->cpu_type > CPU_5K86)
msr.mcar = EAX | ((uint64_t) EDX << 32);
break;
/* Machine Check Type Register */
case 0x00000001:
if (cpu_s->cpu_type > CPU_5K86)
msr.mctr = EAX | ((uint64_t) EDX << 32);
break;
/* Test Register 12 */
case 0x0000000e:
msr.tr12 = EAX & 0x8;
break;
/* Time Stamp Counter */
case 0x00000010:
timer_set_new_tsc(EAX | ((uint64_t) EDX << 32));
break;
/* Array Access Register */
case 0x00000082:
if (cpu_s->cpu_type > CPU_5K86)
goto amd_k_invalid_wrmsr;
msr.amd_aar = EAX | ((uint64_t) EDX << 32);
break;
/* Hardware Configuration Register */
case 0x00000083:
msr.amd_hwcr = EAX | ((uint64_t) EDX << 32);
break;
/* Write Allocate Top-of-Memory and Control Register */
case 0x00000085:
if (cpu_s->cpu_type != CPU_5K86)
goto amd_k_invalid_wrmsr;
msr.amd_watmcr = EAX | ((uint64_t) EDX << 32);
break;
/* Write Allocate Programmable Memory Range Register */
case 0x00000086:
if (cpu_s->cpu_type != CPU_5K86)
goto amd_k_invalid_wrmsr;
msr.amd_wapmrr = EAX | ((uint64_t) EDX << 32);
break;
/* Extended Feature Enable Register */
case 0xc0000080:
temp = EAX | ((uint64_t) EDX << 32);
if (temp & ~1ULL)
x86gpf(NULL, 0);
else
msr.amd_efer = temp;
break;
/* SYSCALL Target Address Register */
case 0xc0000081:
if (cpu_s->cpu_type < CPU_K6_2)
goto amd_k_invalid_wrmsr;
msr.amd_star = EAX | ((uint64_t) EDX << 32);
break;
/* Write-Handling Control Register */
case 0xc0000082:
msr.amd_whcr = EAX | ((uint64_t) EDX << 32);
break;
/* UC/WC Cacheability Control Register */
case 0xc0000085:
if (cpu_s->cpu_type < CPU_K6_2C)
goto amd_k_invalid_wrmsr;
msr.amd_uwccr = EAX | ((uint64_t) EDX << 32);
break;
/* Enhanced Power Management Register */
case 0xc0000086:
if (cpu_s->cpu_type < CPU_K6_2P)
goto amd_k_invalid_wrmsr;
msr.amd_epmr = EAX | ((uint64_t) EDX << 32);
break;
/* Processor State Observability Register */
case 0xc0000087:
if (cpu_s->cpu_type < CPU_K6_2C)
goto amd_k_invalid_wrmsr;
msr.amd_psor = EAX | ((uint64_t) EDX << 32);
break;
/* Page Flush/Invalidate Register */
case 0xc0000088:
if (cpu_s->cpu_type < CPU_K6_2C)
goto amd_k_invalid_wrmsr;
msr.amd_pfir = EAX | ((uint64_t) EDX << 32);
break;
/* Level-2 Cache Array Access Register */
case 0xc0000089:
if (cpu_s->cpu_type < CPU_K6_3)
goto amd_k_invalid_wrmsr;
msr.amd_l2aar = EAX | ((uint64_t) EDX << 32);
break;
default:
amd_k_invalid_wrmsr:
x86gpf(NULL, 0);
break;
}
break;
case CPU_P24T:
case CPU_PENTIUM:
case CPU_PENTIUMMMX:
cpu_log("WRMSR: ECX = %08X, val = %08X%08X\n", ECX, EDX, EAX);
/* Filter out the upper 27 bits when ECX value is over 0x80000000, as per:
Ralf Brown, Pentium Model-Specific Registers and What They Reveal.
path_to_url~ralf/papers/highmsr.html
But leave the bit 31 intact to be able to handle both low and high
MSRs in a single switch block. */
switch (ECX & (ECX > 0x7fffffff ? 0x8000001f : 0x7fffffff)) {
/* Machine Check Exception Address */
case 0x00000000:
case 0x80000000:
/* Machine Check Exception Type */
case 0x00000001:
case 0x80000001:
break;
/* TR1 - Parity Reversal Test Register */
case 0x00000002:
case 0x80000002:
msr.tr1 = EAX & 0x3fff;
break;
/* TR2 - Instruction Cache End Bit */
case 0x00000004:
case 0x80000004:
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
goto pentium_invalid_wrmsr;
msr.tr2 = EAX & 0xf;
break;
/* TR3 - Cache Test Data */
case 0x00000005:
case 0x80000005:
msr.tr3 = EAX;
break;
/* TR4 - Cache Test Tag */
case 0x00000006:
case 0x80000006:
msr.tr4 = EAX & ((cpu_s->cpu_type == CPU_PENTIUMMMX) ? 0xffffff1f : 0xffffff07);
break;
/* TR5 - Cache Test Control */
case 0x00000007:
case 0x80000007:
msr.tr5 = EAX & ((cpu_s->cpu_type == CPU_PENTIUMMMX) ? 0x87fff : 0x7fff);
break;
/* TR6 - TLB Test Command */
case 0x00000008:
case 0x80000008:
msr.tr6 = EAX & 0xffffff07;
break;
/* TR7 - TLB Test Data */
case 0x00000009:
case 0x80000009:
msr.tr7 = EAX & ((cpu_s->cpu_type == CPU_PENTIUMMMX) ? 0xfffffc7f : 0xffffff9c);
break;
/* TR9 - Branch Target Buffer Tag */
case 0x0000000b:
case 0x8000000b:
msr.tr9 = EAX & ((cpu_s->cpu_type == CPU_PENTIUMMMX) ? 0xffffffff : 0xffffffc3);
break;
/* TR10 - Branch Target Buffer Target */
case 0x0000000c:
case 0x8000000c:
msr.tr10 = EAX;
break;
/* TR11 - Branch Target Buffer Control */
case 0x0000000d:
case 0x8000000d:
msr.tr11 = EAX & ((cpu_s->cpu_type >= CPU_PENTIUMMMX) ? 0x3001fcf : 0xfcf);
break;
/* TR12 - New Feature Control */
case 0x0000000e:
case 0x8000000e:
if (cpu_s->cpu_type == CPU_PENTIUMMMX)
temp = EAX & 0x38034f;
else if ((CPUID & 0xfff) >= 0x52b)
temp = EAX & 0x20435f;
else if ((CPUID & 0xfff) >= 0x520)
temp = EAX & 0x20035f;
else
temp = EAX & 0x20030f;
msr.tr12 = temp;
break;
/* Time Stamp Counter */
case 0x00000010:
case 0x80000010:
timer_set_new_tsc(EAX | ((uint64_t) EDX << 32));
break;
/* Performance Monitor - Control and Event Select */
case 0x00000011:
case 0x80000011:
msr.cesr = EAX & 0x3ff03ff;
break;
/* Performance Monitor - Event Counter 0 */
case 0x00000012:
case 0x80000012:
msr.pmc[0] = EAX | ((uint64_t) EDX << 32);
break;
/* Performance Monitor - Event Counter 1 */
case 0x00000013:
case 0x80000013:
msr.pmc[1] = EAX | ((uint64_t) EDX << 32);
break;
/* Unknown */
case 0x00000014:
case 0x80000014:
if ((CPUID & 0xfff) <= 0x520)
goto pentium_invalid_wrmsr;
break;
/* Unknown, possibly paging-related; initial value is 0004h,
becomes 0008h once paging is enabled */
case 0x80000018:
/* Floating point - last prefetched opcode
bits 10-8: low three bits of first byte of FP instruction
bits 7-0: second byte of floating-point instruction */
case 0x80000019:
/* Floating point - last executed non-control opcode */
case 0x8000001a:
break;
/* Floating point - last non-control exception opcode - part
of FSTENV/FSAVE'd environment */
case 0x8000001b:
EAX = msr.fp_last_xcpt & 0x7ff;
break;
/* Unknown */
case 0x8000001c:
break;
/* Probe Mode Control */
case 0x8000001d:
EAX = msr.probe_ctl & 0x7;
break;
/* Unknown, possibly scratchpad register */
case 0x8000001e:
msr.ecx8000001e = EAX;
break;
/* Unknown, possibly scratchpad register */
case 0x8000001f:
msr.ecx8000001f = EAX;
break;
/* Reserved/Unimplemented */
case 0x80000003:
case 0x8000000a:
case 0x8000000f:
case 0x80000015 ... 0x80000017:
break;
default:
pentium_invalid_wrmsr:
cpu_log("WRMSR: Invalid MSR: %08X\n", ECX);
x86gpf(NULL, 0);
break;
}
break;
#ifdef USE_CYRIX_6X86
case CPU_Cx6x86:
case CPU_Cx6x86L:
case CPU_CxGX1:
case CPU_Cx6x86MX:
cpu_log("WRMSR: ECX = %08X, val = %08X%08X\n", ECX, EDX, EAX);
switch (ECX) {
/* Test Data */
case 0x03:
msr.tr3 = EAX;
/* Test Address */
case 0x04:
msr.tr4 = EAX;
/* Test Command/Status */
case 0x05:
msr.tr5 = EAX & 0x008f0f3b;
/* Time Stamp Counter */
case 0x10:
timer_set_new_tsc(EAX | ((uint64_t) EDX << 32));
break;
/* Performance Monitor - Control and Event Select */
case 0x11:
msr.cesr = EAX & 0x7ff07ff;
break;
/* Performance Monitor - Event Counter 0 */
case 0x12:
msr.pmc[0] = EAX | ((uint64_t) EDX << 32);
break;
/* Performance Monitor - Event Counter 1 */
case 0x13:
msr.pmc[1] = EAX | ((uint64_t) EDX << 32);
break;
}
break;
#endif /* USE_CYRIX_6X86 */
case CPU_PENTIUMPRO:
case CPU_PENTIUM2:
case CPU_PENTIUM2D:
/* Per RichardG's probing of a real Deschutes using my RDMSR tool,
we have discovered that the top 18 bits are filtered out. */
switch (ECX & 0x00003fff) {
/* Machine Check Exception Address */
case 0x00:
/* Machine Check Exception Type */
case 0x01:
if (EAX || EDX)
x86gpf(NULL, 0);
break;
/* Time Stamp Counter */
case 0x10:
timer_set_new_tsc(EAX | ((uint64_t) EDX << 32));
break;
/* Unknown */
case 0x18:
break;
/* IA32_APIC_BASE - APIC Base Address */
case 0x1b:
cpu_log("APIC_BASE write: %08X%08X\n", EDX, EAX);
#if 0
msr.apic_base = EAX | ((uint64_t) EDX << 32);
#endif
break;
/* Unknown (undocumented?) MSR used by the Hyper-V BIOS */
case 0x20:
msr.ecx20 = EAX | ((uint64_t) EDX << 32);
break;
/* Unknown */
case 0x21:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_wrmsr;
break;
/* EBL_CR_POWERON - Processor Hard Power-On Configuration */
case 0x2a:
break;
/* Unknown */
case 0x32:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_wrmsr;
break;
/* TEST_CTL - Test Control Register */
case 0x33:
msr.test_ctl = EAX;
break;
/* Unknown */
case 0x34:
case 0x3a:
case 0x3b:
case 0x50 ... 0x54:
break;
/* BIOS_UPDT_TRIG - BIOS Update Trigger */
case 0x79:
msr.bios_updt = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_D0 ... BBL_CR_D3 - Chunk 0..3 Data Register
8Bh: BIOS_SIGN - BIOS Update Signature */
case 0x88 ... 0x8b:
msr.bbl_cr_dx[ECX - 0x88] = EAX | ((uint64_t) EDX << 32);
break;
/* Unknown */
case 0xae:
break;
/* PERFCTR0 - Performance Counter Register 0 */
case 0xc1:
/* PERFCTR1 - Performance Counter Register 1 */
case 0xc2:
msr.perfctr[ECX - 0xC1] = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRcap */
case 0xfe:
msr.mtrr_cap = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_ADDR - L2 Cache Address Register */
case 0x116:
msr.bbl_cr_addr = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_DECC - L2 Cache Date ECC Register */
case 0x118:
msr.bbl_cr_decc = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_CTL - L2 Cache Control Register */
case 0x119:
msr.bbl_cr_ctl = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_TRIG - L2 Cache Trigger Register */
case 0x11a:
msr.bbl_cr_trig = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_BUSY - L2 Cache Busy Register */
case 0x11b:
msr.bbl_cr_busy = EAX | ((uint64_t) EDX << 32);
break;
/* BBL_CR_CTL3 - L2 Cache Control Register 3 */
case 0x11e:
msr.bbl_cr_ctl3 = EAX | ((uint64_t) EDX << 32);
break;
/* Unknown */
case 0x131:
case 0x14e ... 0x151:
case 0x154:
case 0x15b:
case 0x15f:
break;
/* SYSENTER_CS - SYSENTER target CS */
case 0x174:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_wrmsr;
msr.sysenter_cs = EAX & 0xFFFF;
break;
/* SYSENTER_ESP - SYSENTER target ESP */
case 0x175:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_wrmsr;
msr.sysenter_esp = EAX;
break;
/* SYSENTER_EIP - SYSENTER target EIP */
case 0x176:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_wrmsr;
msr.sysenter_eip = EAX;
break;
/* MCG_CAP - Machine Check Global Capability */
case 0x179:
break;
/* MCG_STATUS - Machine Check Global Status */
case 0x17a:
if (EAX || EDX)
x86gpf(NULL, 0);
break;
/* MCG_CTL - Machine Check Global Control */
case 0x17b:
msr.mcg_ctl = EAX | ((uint64_t) EDX << 32);
break;
/* EVNTSEL0 - Performance Counter Event Select 0 */
case 0x186:
/* EVNTSEL1 - Performance Counter Event Select 1 */
case 0x187:
msr.evntsel[ECX - 0x186] = EAX | ((uint64_t) EDX << 32);
break;
case 0x1d3:
break;
/* DEBUGCTLMSR - Debugging Control Register */
case 0x1d9:
msr.debug_ctl = EAX;
break;
/* ROB_CR_BKUPTMPDR6 */
case 0x1e0:
msr.rob_cr_bkuptmpdr6 = EAX;
break;
/* ECX & 0: MTRRphysBase0 ... MTRRphysBase7
ECX & 1: MTRRphysMask0 ... MTRRphysMask7 */
case 0x200 ... 0x20f:
if (ECX & 1)
msr.mtrr_physmask[(ECX - 0x200) >> 1] = EAX | ((uint64_t) EDX << 32);
else
msr.mtrr_physbase[(ECX - 0x200) >> 1] = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix64K_00000 */
case 0x250:
msr.mtrr_fix64k_8000 = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix16K_80000 */
case 0x258:
msr.mtrr_fix16k_8000 = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix16K_A0000 */
case 0x259:
msr.mtrr_fix16k_a000 = EAX | ((uint64_t) EDX << 32);
break;
/* MTRRfix4K_C0000 ... MTRRfix4K_F8000 */
case 0x268 ... 0x26f:
msr.mtrr_fix4k[ECX - 0x268] = EAX | ((uint64_t) EDX << 32);
break;
/* Page Attribute Table */
case 0x277:
if (cpu_s->cpu_type < CPU_PENTIUM2D)
goto i686_invalid_wrmsr;
msr.pat = EAX | ((uint64_t) EDX << 32);
break;
/* Unknown */
case 0x280:
if (cpu_s->cpu_type == CPU_PENTIUMPRO)
goto i686_invalid_wrmsr;
break;
/* MTRRdefType */
case 0x2ff:
msr.mtrr_deftype = EAX | ((uint64_t) EDX << 32);
break;
/* MC0_CTL - Machine Check 0 Control */
case 0x400:
/* MC1_CTL - Machine Check 1 Control */
case 0x404:
/* MC2_CTL - Machine Check 2 Control */
case 0x408:
/* MC4_CTL - Machine Check 4 Control */
case 0x40c:
/* MC3_CTL - Machine Check 3 Control */
case 0x410:
msr.mca_ctl[(ECX - 0x400) >> 2] = EAX | ((uint64_t) EDX << 32);
break;
/* MC0_STATUS - Machine Check 0 Status */
case 0x401:
/* MC0_ADDR - Machine Check 0 Address */
case 0x402:
/* MC1_STATUS - Machine Check 1 Status */
case 0x405:
/* MC1_ADDR - Machine Check 1 Address */
case 0x406:
/* MC2_STATUS - Machine Check 2 Status */
case 0x409:
/* MC2_ADDR - Machine Check 2 Address */
case 0x40a:
/* MC4_STATUS - Machine Check 4 Status */
case 0x40d:
/* MC4_ADDR - Machine Check 4 Address */
case 0x40e:
/* MC3_STATUS - Machine Check 3 Status */
case 0x411:
/* MC3_ADDR - Machine Check 3 Address */
case 0x412:
if (EAX || EDX)
x86gpf(NULL, 0);
break;
/* Unknown */
case 0x570:
msr.ecx570 = EAX | ((uint64_t) EDX << 32);
break;
/* Unknown, possibly debug registers? */
case 0x1000 ... 0x1007:
/* Unknown, possibly control registers? */
case 0x2000:
case 0x2002 ... 0x2004:
break;
default:
i686_invalid_wrmsr:
cpu_log("WRMSR: Invalid MSR: %08X\n", ECX);
x86gpf(NULL, 0);
break;
}
break;
}
}
static void
cpu_write(uint16_t addr, uint8_t val, UNUSED(void *priv))
{
if (addr == 0xf0) {
/* Writes to F0 clear FPU error and deassert the interrupt. */
if (is286)
picintc(1 << 13);
else
nmi = 0;
return;
} else if (addr >= 0xf1)
return; /* FPU stuff */
if (!(addr & 1))
cyrix_addr = val;
else
switch (cyrix_addr) {
case 0xc0: /* CCR0 */
ccr0 = val;
break;
case 0xc1: /* CCR1 */
if ((ccr3 & CCR3_SMI_LOCK) && !in_smm)
val = (val & ~(CCR1_USE_SMI | CCR1_SMAC | CCR1_SM3)) | (ccr1 & (CCR1_USE_SMI | CCR1_SMAC | CCR1_SM3));
ccr1 = val;
break;
case 0xc2: /* CCR2 */
ccr2 = val;
break;
case 0xc3: /* CCR3 */
if ((ccr3 & CCR3_SMI_LOCK) && !in_smm)
val = (val & ~(CCR3_NMI_EN)) | (ccr3 & CCR3_NMI_EN) | CCR3_SMI_LOCK;
ccr3 = val;
break;
case 0xcd:
if (!(ccr3 & CCR3_SMI_LOCK) || in_smm) {
cyrix.arr[3].base = (cyrix.arr[3].base & ~0xff000000) | (val << 24);
cyrix.smhr &= ~SMHR_VALID;
}
break;
case 0xce:
if (!(ccr3 & CCR3_SMI_LOCK) || in_smm) {
cyrix.arr[3].base = (cyrix.arr[3].base & ~0x00ff0000) | (val << 16);
cyrix.smhr &= ~SMHR_VALID;
}
break;
case 0xcf:
if (!(ccr3 & CCR3_SMI_LOCK) || in_smm) {
cyrix.arr[3].base = (cyrix.arr[3].base & ~0x0000f000) | ((val & 0xf0) << 8);
if ((val & 0xf) == 0xf)
cyrix.arr[3].size = 1ULL << 32; /* 4 GB */
else if (val & 0xf)
cyrix.arr[3].size = 2048 << (val & 0xf);
else
cyrix.arr[3].size = 0; /* Disabled */
cyrix.smhr &= ~SMHR_VALID;
}
break;
case 0xe8: /* CCR4 */
if ((ccr3 & 0xf0) == 0x10) {
ccr4 = val;
#ifdef USE_CYRIX_6X86
if (cpu_s->cpu_type >= CPU_Cx6x86) {
if (val & 0x80)
CPUID = cpu_s->cpuid_model;
else
CPUID = 0;
}
#endif /* USE_CYRIX_6X86 */
}
break;
case 0xe9: /* CCR5 */
if ((ccr3 & 0xf0) == 0x10)
ccr5 = val;
break;
case 0xea: /* CCR6 */
if ((ccr3 & 0xf0) == 0x10)
ccr6 = val;
break;
}
}
static uint8_t
cpu_read(uint16_t addr, UNUSED(void *priv))
{
if (addr == 0xf007)
return 0x7f;
if (addr >= 0xf0)
return 0xff; /* FPU stuff */
if (addr & 1) {
switch (cyrix_addr) {
case 0xc0:
return ccr0;
case 0xc1:
return ccr1;
case 0xc2:
return ccr2;
case 0xc3:
return ccr3;
case 0xe8:
return ((ccr3 & 0xf0) == 0x10) ? ccr4 : 0xff;
case 0xe9:
return ((ccr3 & 0xf0) == 0x10) ? ccr5 : 0xff;
case 0xea:
return ((ccr3 & 0xf0) == 0x10) ? ccr6 : 0xff;
case 0xfe:
return cpu_s->cyrix_id & 0xff;
case 0xff:
return cpu_s->cyrix_id >> 8;
default:
break;
}
if ((cyrix_addr & 0xf0) == 0xc0)
return 0xff;
if (cyrix_addr == 0x20 && (cpu_s->cpu_type == CPU_Cx5x86))
return 0xff;
}
return 0xff;
}
void
#ifdef USE_DYNAREC
x86_setopcodes(const OpFn *opcodes, const OpFn *opcodes_0f,
const OpFn *dynarec_opcodes, const OpFn *dynarec_opcodes_0f)
{
x86_opcodes = opcodes;
x86_opcodes_0f = opcodes_0f;
x86_dynarec_opcodes = dynarec_opcodes;
x86_dynarec_opcodes_0f = dynarec_opcodes_0f;
}
#else
x86_setopcodes(const OpFn *opcodes, const OpFn *opcodes_0f)
{
x86_opcodes = opcodes;
x86_opcodes_0f = opcodes_0f;
}
#endif /* USE_DYNAREC */
void
x86_setopcodes_2386(const OpFn *opcodes, const OpFn *opcodes_0f)
{
x86_2386_opcodes = opcodes;
x86_2386_opcodes_0f = opcodes_0f;
}
void
cpu_update_waitstates(void)
{
cpu_s = (CPU *) &cpu_f->cpus[cpu_effective];
if (is486)
cpu_prefetch_width = 16;
else
cpu_prefetch_width = cpu_16bitbus ? 2 : 4;
if (cpu_cache_int_enabled) {
/* Disable prefetch emulation */
cpu_prefetch_cycles = 0;
} else if (cpu_waitstates && (cpu_s->cpu_type >= CPU_286 && cpu_s->cpu_type <= CPU_386DX)) {
/* Waitstates override */
cpu_prefetch_cycles = cpu_waitstates + 1;
cpu_cycles_read = cpu_waitstates + 1;
cpu_cycles_read_l = (cpu_16bitbus ? 2 : 1) * (cpu_waitstates + 1);
cpu_cycles_write = cpu_waitstates + 1;
cpu_cycles_write_l = (cpu_16bitbus ? 2 : 1) * (cpu_waitstates + 1);
} else if (cpu_cache_ext_enabled) {
/* Use cache timings */
cpu_prefetch_cycles = cpu_s->cache_read_cycles;
cpu_cycles_read = cpu_s->cache_read_cycles;
cpu_cycles_read_l = (cpu_16bitbus ? 2 : 1) * cpu_s->cache_read_cycles;
cpu_cycles_write = cpu_s->cache_write_cycles;
cpu_cycles_write_l = (cpu_16bitbus ? 2 : 1) * cpu_s->cache_write_cycles;
} else {
/* Use memory timings */
cpu_prefetch_cycles = cpu_s->mem_read_cycles;
cpu_cycles_read = cpu_s->mem_read_cycles;
cpu_cycles_read_l = (cpu_16bitbus ? 2 : 1) * cpu_s->mem_read_cycles;
cpu_cycles_write = cpu_s->mem_write_cycles;
cpu_cycles_write_l = (cpu_16bitbus ? 2 : 1) * cpu_s->mem_write_cycles;
}
if (is486)
cpu_prefetch_cycles = (cpu_prefetch_cycles * 11) / 16;
cpu_mem_prefetch_cycles = cpu_prefetch_cycles;
if (cpu_s->rspeed <= 8000000)
cpu_rom_prefetch_cycles = cpu_mem_prefetch_cycles;
}
``` | /content/code_sandbox/src/cpu/cpu.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 49,284 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* Define all known processor types.
*
*
*
* Authors: Sarah Walker, <path_to_url
* leilei,
* Miran Grca, <mgrca8@gmail.com>
* Fred N. van Kempen, <decwiz@yahoo.com>
* RichardG, <richardg867@gmail.com>
* dob205,
* Jasmine Iwanek, <jriwanek@gmail.com>
*
*/
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/machine.h>
FPU fpus_none[] = {
{ .name = "None", .internal_name = "none", .type = FPU_NONE },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
FPU fpus_8088[] = {
{ .name = "None", .internal_name = "none", .type = FPU_NONE },
{ .name = "8087", .internal_name = "8087", .type = FPU_8087 },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
FPU fpus_80186[] = {
{ .name = "None", .internal_name = "none", .type = FPU_NONE },
{ .name = "8087", .internal_name = "8087", .type = FPU_8087 },
{ .name = "80187", .internal_name = "80187", .type = FPU_80187 },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
FPU fpus_80286[] = {
{ .name = "None", .internal_name = "none", .type = FPU_NONE },
{ .name = "287", .internal_name = "287", .type = FPU_287 },
{ .name = "287XL", .internal_name = "287xl", .type = FPU_287XL },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
FPU fpus_80386[] = {
{ .name = "None", .internal_name = "none", .type = FPU_NONE },
{ .name = "387", .internal_name = "387", .type = FPU_387 },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
FPU fpus_486sx[] = {
{ .name = "None", .internal_name = "none", .type = FPU_NONE },
{ .name = "487SX", .internal_name = "487sx", .type = FPU_487SX },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
FPU fpus_internal[] = {
{ .name = "Internal", .internal_name = "internal", .type = FPU_INTERNAL },
{ .name = NULL, .internal_name = NULL, .type = 0 }
};
const cpu_family_t cpu_families[] = {
// clang-format off
{
.package = CPU_PKG_8088,
.manufacturer = "Intel",
.name = "8088",
.internal_name = "8088",
.cpus = (const CPU[]) {
{
.name = "4.77",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 4772728,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "7.16",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 7159092,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "8",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 8000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
#if 0
{
.name = "9.54",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 9545456,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
#endif
{
.name = "10",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 10000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 12000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "16",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_8088_EUROPC,
.manufacturer = "Intel",
.name = "8088",
.internal_name = "8088_europc",
.cpus = (const CPU[]) {
{
.name = "4.77",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 4772728,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "7.16",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 7159092,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "9.54",
.cpu_type = CPU_8088,
.fpus = fpus_8088,
.rspeed = 9545456,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_8086,
.manufacturer = "Intel",
.name = "8086",
.internal_name = "8086",
.cpus = (const CPU[]) {
{
.name = "7.16",
.cpu_type = CPU_8086,
.fpus = fpus_8088,
.rspeed = 7159092,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "8",
.cpu_type = CPU_8086,
.fpus = fpus_8088,
.rspeed = 8000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "9.54",
.cpu_type = CPU_8086,
.fpus = fpus_8088,
.rspeed = 9545456,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "10",
.cpu_type = CPU_8086,
.fpus = fpus_8088,
.rspeed = 10000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_8086,
.fpus = fpus_8088,
.rspeed = 12000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "16",
.cpu_type = CPU_8086,
.fpus = fpus_8088,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 2
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_188,
.manufacturer = "Intel",
.name = "80188",
.internal_name = "80188",
.cpus = (const CPU[]) {
{
.name = "6",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 6000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "7.16",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 7159092,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "8",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 8000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "9.54",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 9545456,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "10",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 10000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 12000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "16",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_188,
.fpus = fpus_8088,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 3
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_8088,
.manufacturer = "NEC",
.name = "V20",
.internal_name = "necv20",
.cpus = (const CPU[]) {
{
.name = "4.77",
.cpu_type = CPU_V20,
.fpus = fpus_8088,
.rspeed = 4772728,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "7.16",
.cpu_type = CPU_V20,
.fpus = fpus_8088,
.rspeed = 7159092,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "10",
.cpu_type = CPU_V20,
.fpus = fpus_8088,
.rspeed = 10000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_V20,
.fpus = fpus_8088,
.rspeed = 12000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "16",
.cpu_type = CPU_V20,
.fpus = fpus_8088,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 2
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_186,
.manufacturer = "Intel",
.name = "80186",
.internal_name = "80186",
.cpus = (const CPU[]) {
{
.name = "6",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 6000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "7.16",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 7159092,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "8",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 8000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "9.54",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 9545456,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_ALTERNATE_XTAL,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "10",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 10000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 12000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "16",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 16000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 20000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_186,
.fpus = fpus_80186,
.rspeed = 25000000,
.multi = 1,
.voltage = 0,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 3
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_8086,
.manufacturer = "NEC",
.name = "V30",
.internal_name = "necv30",
.cpus = (const CPU[]) {
{
.name = "5",
.cpu_type = CPU_V30,
.fpus = fpus_80186,
.rspeed = 5000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "8",
.cpu_type = CPU_V30,
.fpus = fpus_80186,
.rspeed = 8000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "10",
.cpu_type = CPU_V30,
.fpus = fpus_80186,
.rspeed = 10000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_V30,
.fpus = fpus_80186,
.rspeed = 12000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 1
},
{
.name = "16",
.cpu_type = CPU_V30,
.fpus = fpus_80186,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 0,
.mem_write_cycles = 0,
.cache_read_cycles = 0,
.cache_write_cycles = 0,
.atclk_div = 2
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_286,
.manufacturer = "Intel",
.name = "80286",
.internal_name = "286",
.cpus = (const CPU[]) {
{
.name = "6",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 6000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 2,
.mem_write_cycles = 2,
.cache_read_cycles = 2,
.cache_write_cycles = 2,
.atclk_div = 1
},
{
.name = "8",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 8000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 2,
.mem_write_cycles = 2,
.cache_read_cycles = 2,
.cache_write_cycles = 2,
.atclk_div = 1
},
{
.name = "10",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 10000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 2,
.mem_write_cycles = 2,
.cache_read_cycles = 2,
.cache_write_cycles = 2,
.atclk_div = 1
},
{
.name = "12",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 12500000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "16",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_286,
.fpus = fpus_80286,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 3
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386SX,
.manufacturer = "Intel",
.name = "i386SX",
.internal_name = "i386sx",
.cpus = (const CPU[]) {
{
.name = "16",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386SX,
.manufacturer = "AMD",
.name = "Am386SX",
.internal_name = "am386sx",
.cpus = (const CPU[]) {
{
.name = "16",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_386SX,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386DX,
.manufacturer = "Intel",
.name = "i386DX",
.internal_name = "i386dx",
.cpus = (const CPU[]) {
{
.name = "16",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386DX_DESKPRO386,
.manufacturer = "Intel",
.name = "i386DX",
.internal_name = "i386dx_deskpro386",
.cpus = (const CPU[]) {
{
.name = "16",
.cpu_type = CPU_386DX,
.fpus = fpus_80286,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386DX,
.manufacturer = "Intel",
.name = "RapidCAD",
.internal_name = "rapidcad",
.cpus = (const CPU[]) {
{
.name = "25",
.cpu_type = CPU_RAPIDCAD,
.fpus = fpus_internal,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0340,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_RAPIDCAD,
.fpus = fpus_internal,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0340,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_RAPIDCAD,
.fpus = fpus_internal,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0340,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386DX,
.manufacturer = "AMD",
.name = "Am386DX",
.internal_name = "am386dx",
.cpus = (const CPU[]) {
{
.name = "25",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_386DX,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x0308,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_M6117,
.manufacturer = "ALi",
.name = "M6117",
.internal_name = "m6117",
.cpus = (const CPU[]) { /* All timings and edx_reset values assumed. */
{
.name = "33",
.cpu_type = CPU_386SX,
.fpus = fpus_none,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2309,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_386SX,
.fpus = fpus_none,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x2309,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386SLC_IBM,
.manufacturer = "IBM",
.name = "386SLC",
.internal_name = "ibm386slc",
.cpus = (const CPU[]) {
{
.name = "16",
.cpu_type = CPU_IBM386SLC,
.fpus = fpus_80386,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0xA301,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_IBM386SLC,
.fpus = fpus_80386,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0xA301,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_IBM386SLC,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0xA301,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386SX,
.manufacturer = "Cyrix",
.name = "Cx486SLC",
.internal_name = "cx486slc",
.cpus = (const CPU[]) {
{
.name = "20",
.cpu_type = CPU_486SLC,
.fpus = fpus_80386,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x400,
.cpuid_model = 0,
.cyrix_id = 0x0000,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_486SLC,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x400,
.cpuid_model = 0,
.cyrix_id = 0x0000,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_486SLC,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x400,
.cpuid_model = 0,
.cyrix_id = 0x0000,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386SX,
.manufacturer = "Cyrix",
.name = "Cx486SRx2",
.internal_name = "cx486srx2",
.cpus = (const CPU[]) {
{
.name = "32",
.cpu_type = CPU_486SLC,
.fpus = fpus_80386,
.rspeed = 32000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x406,
.cpuid_model = 0,
.cyrix_id = 0x0006,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_486SLC,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x406,
.cpuid_model = 0,
.cyrix_id = 0x0006,
.cpu_flags = 0,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "50",
.cpu_type = CPU_486SLC,
.fpus = fpus_80386,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x406,
.cpuid_model = 0,
.cyrix_id = 0x0006,
.cpu_flags = 0,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_486SLC_IBM,
.manufacturer = "IBM",
.name = "486SLC",
.internal_name = "ibm486slc",
.cpus = (const CPU[]) {
{
.name = "33",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0xA401,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_486SLC_IBM,
.manufacturer = "IBM",
.name = "486SLC2",
.internal_name = "ibm486slc2",
.cpus = (const CPU[]) {
{
.name = "40",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0xA421,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 5
},
{
.name = "50",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0xA421,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0xA421,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_486SLC_IBM,
.manufacturer = "IBM",
.name = "486SLC3",
.internal_name = "ibm486slc3",
.cpus = (const CPU[]) {
{
.name = "60",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 60000000,
.multi = 3,
.voltage = 5000,
.edx_reset = 0xA439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 7
},
{
.name = "75",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 75000000,
.multi = 3,
.voltage = 5000,
.edx_reset = 0xA439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 9
},
{
.name = "100",
.cpu_type = CPU_IBM486SLC,
.fpus = fpus_80386,
.rspeed = 100000000,
.multi = 3,
.voltage = 5000,
.edx_reset = 0xA439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_486BL,
.manufacturer = "IBM",
.name = "486BL2",
.internal_name = "ibm486bl2",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_IBM486BL,
.fpus = fpus_80386,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x8439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_IBM486BL,
.fpus = fpus_80386,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x8439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_486BL,
.manufacturer = "IBM",
.name = "486BL3",
.internal_name = "ibm486bl3",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_IBM486BL,
.fpus = fpus_80386,
.rspeed = 75000000,
.multi = 3,
.voltage = 5000,
.edx_reset = 0x8439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 9
},
{
.name = "100",
.cpu_type = CPU_IBM486BL,
.fpus = fpus_80386,
.rspeed = 100000000,
.multi = 3,
.voltage = 5000,
.edx_reset = 0x8439,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = 0,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386DX,
.manufacturer = "Cyrix",
.name = "Cx486DLC",
.internal_name = "cx486dlc",
.cpus = (const CPU[]) {
{
.name = "25",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x401,
.cpuid_model = 0,
.cyrix_id = 0x0001,
.cpu_flags = 0,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x401,
.cpuid_model = 0,
.cyrix_id = 0x0001,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x401,
.cpuid_model = 0,
.cyrix_id = 0x0001,
.cpu_flags = 0,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_386DX,
.manufacturer = "Cyrix",
.name = "Cx486DRx2",
.internal_name = "cx486drx2",
.cpus = (const CPU[]) {
{
.name = "32",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 32000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x407,
.cpuid_model = 0,
.cyrix_id = 0x0007,
.cpu_flags = 0,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 40000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x407,
.cpuid_model = 0,
.cyrix_id = 0x0007,
.cpu_flags = 0,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "50",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x407,
.cpuid_model = 0,
.cyrix_id = 0x0007,
.cpu_flags = 0,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_486DLC,
.fpus = fpus_80386,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x407,
.cpuid_model = 0,
.cyrix_id = 0x0007,
.cpu_flags = 0,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486SX",
.internal_name = "i486sx",
.cpus = (const CPU[]) {
{
.name = "16",
.cpu_type = CPU_i486SX,
.fpus = fpus_486sx,
.rspeed = 16000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x420,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 3,
.mem_write_cycles = 3,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 2
},
{
.name = "20",
.cpu_type = CPU_i486SX,
.fpus = fpus_486sx,
.rspeed = 20000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x420,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "25",
.cpu_type = CPU_i486SX,
.fpus = fpus_486sx,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x422,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_i486SX,
.fpus = fpus_486sx,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x422,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486SX-S",
.internal_name = "i486sx_slenh",
.cpus = (const CPU[]) {
{
.name = "25",
.cpu_type = CPU_i486SX_SLENH,
.fpus = fpus_486sx,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x423,
.cpuid_model = 0x423,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_i486SX_SLENH,
.fpus = fpus_486sx,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x42a,
.cpuid_model = 0x42a,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486SX2",
.internal_name = "i486sx2",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_i486SX_SLENH,
.fpus = fpus_486sx,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x45b,
.cpuid_model = 0x45b,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66 (Q0569)",
.cpu_type = CPU_i486SX_SLENH,
.fpus = fpus_486sx,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x45b,
.cpuid_model = 0x45b,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486DX",
.internal_name = "i486dx",
.cpus = (const CPU[]) {
{
.name = "25",
.cpu_type = CPU_i486DX,
.fpus = fpus_internal,
.rspeed = 25000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x404,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_i486DX,
.fpus = fpus_internal,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x404,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "50",
.cpu_type = CPU_i486DX,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x411,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 6
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486DX-S",
.internal_name = "i486dx_slenh",
.cpus = (const CPU[]) {
{
.name = "33",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x414,
.cpuid_model = 0x414,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "50",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x414,
.cpuid_model = 0x414,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 6
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486DX2",
.internal_name = "i486dx2",
.cpus = (const CPU[]) {
{
.name = "40",
.cpu_type = CPU_i486DX,
.fpus = fpus_internal,
.rspeed = 40000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 5
},
{
.name = "50",
.cpu_type = CPU_i486DX,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x433,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_i486DX,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x433,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Intel",
.name = "i486DX2-S",
.internal_name = "i486dx2_slenh",
.cpus = (const CPU[]) {
{
.name = "40",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 40000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x435,
.cpuid_model = 0x435,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 5
},
{
.name = "50",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x435,
.cpuid_model = 0x435,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x435,
.cpuid_model = 0x435,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1 | CPU_PKG_SOCKET3_PC330,
.manufacturer = "Intel",
.name = "i486DX2 WB",
.internal_name = "i486dx2_pc330",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x436,
.cpuid_model = 0x436,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x436,
.cpuid_model = 0x436,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{ /*OEM versions are 3.3V, Retail versions are 3.3V with a 5V regulator for installation in older boards. They are functionally identical*/
.package = CPU_PKG_SOCKET1 | CPU_PKG_SOCKET3_PC330,
.manufacturer = "Intel",
.name = "iDX4",
.internal_name = "idx4",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 3.0,
.voltage = 3300,
.edx_reset = 0x480,
.cpuid_model = 0x480,
.cyrix_id = 0x0000,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 9
},
{
.name = "100",
.cpu_type = CPU_i486DX_SLENH,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 3.0,
.voltage = 3300,
.edx_reset = 0x483,
.cpuid_model = 0x483,
.cyrix_id = 0x0000,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3 | CPU_PKG_SOCKET3_PC330,
.manufacturer = "Intel",
.name = "Pentium OverDrive",
.internal_name = "pentium_p24t",
.cpus = (const CPU[]) {
{
.name = "63",
.cpu_type = CPU_P24T,
.fpus = fpus_internal,
.rspeed = 62500000,
.multi = 2.5,
.voltage = 5000,
.edx_reset = 0x1531,
.cpuid_model = 0x1531,
.cyrix_id = 0x0000,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 10,
.mem_write_cycles = 10,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 15/2
},
{
.name = "83",
.cpu_type = CPU_P24T,
.fpus = fpus_internal,
.rspeed = 83333333,
.multi = 2.5,
.voltage = 5000,
.edx_reset = 0x1532,
.cpuid_model = 0x1532,
.cyrix_id = 0x0000,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 8,
.cache_write_cycles = 8,
.atclk_div = 10
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "AMD",
.name = "Am486SX",
.internal_name = "am486sx",
.cpus = (const CPU[]) {
{
.name = "33",
.cpu_type = CPU_Am486SX,
.fpus = fpus_486sx,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x422,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_Am486SX,
.fpus = fpus_486sx,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x422,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "AMD",
.name = "Am486SX2",
.internal_name = "am486sx2",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_Am486SX,
.fpus = fpus_486sx,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x45b,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_Am486SX,
.fpus = fpus_486sx,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x45b,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "AMD",
.name = "Am486DX",
.internal_name = "am486dx",
.cpus = (const CPU[]) {
{
.name = "33",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x412,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x412,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "AMD",
.name = "Am486DX2",
.internal_name = "am486dx2",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{
.name = "80",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 80000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "AMD",
.name = "Am486DXL",
.internal_name = "am486dxl",
.cpus = (const CPU[]) {
{
.name = "33",
.cpu_type = CPU_Am486DXL,
.fpus = fpus_internal,
.rspeed = 33333333,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x422,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_Am486DXL,
.fpus = fpus_internal,
.rspeed = 40000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x422,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "AMD",
.name = "Am486DXL2",
.internal_name = "am486dxl2",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_Am486DXL,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_Am486DXL,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{
.name = "80",
.cpu_type = CPU_Am486DXL,
.fpus = fpus_internal,
.rspeed = 80000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3,
.manufacturer = "AMD",
.name = "Am486DX4",
.internal_name = "am486dx4",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 9
},
{
.name = "90",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 90000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{
.name = "100",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{
.name = "120",
.cpu_type = CPU_Am486DX,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x432,
.cpuid_model = 0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 15
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3,
.manufacturer = "AMD",
.name = "Am486DX2 (Enhanced)",
.internal_name = "am486dx2_slenh",
.cpus = (const CPU[]) {
{
.name = "66",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x435,
.cpuid_model = 0x435,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{
.name = "80",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 80000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x435,
.cpuid_model = 0x435,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3,
.manufacturer = "AMD",
.name = "Am486DX4 (Enhanced)",
.internal_name = "am486dx4_slenh",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x482,
.cpuid_model = 0x482,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 9
},
{
.name = "100",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x482,
.cpuid_model = 0x482,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{
.name = "120",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x482,
.cpuid_model = 0x482,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 15
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3,
.manufacturer = "AMD",
.name = "Am5x86",
.internal_name = "am5x86",
.cpus = (const CPU[]) {
{
.name = "133 (P75)",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 4.0,
.voltage = 3450,
.edx_reset = 0x4e0,
.cpuid_model = 0x4e0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 16
},
{ /*The rare P75+ was indeed a triple-clocked 150 MHz according to research*/
.name = "150 (P75+)",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 3.0,
.voltage = 3450,
.edx_reset = 0x482,
.cpuid_model = 0x482,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 28,
.mem_write_cycles = 28,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 20
},
{ /*160 MHz on a 40 MHz bus was a common overclock and "5x86/P90" was used by a number of BIOSes to refer to that configuration*/
.name = "160 (P90)",
.cpu_type = CPU_ENH_Am486DX,
.fpus = fpus_internal,
.rspeed = 160000000,
.multi = 4.0,
.voltage = 3450,
.edx_reset = 0x4e0,
.cpuid_model = 0x4e0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 28,
.mem_write_cycles = 28,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 20
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Cyrix",
.name = "Cx486S",
.internal_name = "cx486s",
.cpus = (const CPU[]) {
{
.name = "25",
.cpu_type = CPU_Cx486S,
.fpus = fpus_486sx,
.rspeed = 25000000,
.multi = 1.0,
.voltage = 5000,
.edx_reset = 0x420,
.cpuid_model = 0,
.cyrix_id = 0x0010,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 3
},
{
.name = "33",
.cpu_type = CPU_Cx486S,
.fpus = fpus_486sx,
.rspeed = 33333333,
.multi = 1.0,
.voltage = 5000,
.edx_reset = 0x420,
.cpuid_model = 0,
.cyrix_id = 0x0010,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_Cx486S,
.fpus = fpus_486sx,
.rspeed = 40000000,
.multi = 1.0,
.voltage = 5000,
.edx_reset = 0x420,
.cpuid_model = 0,
.cyrix_id = 0x0010,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Cyrix",
.name = "Cx486DX",
.internal_name = "cx486dx",
.cpus = (const CPU[]) {
{
.name = "33",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 33333333,
.multi = 1.0,
.voltage = 5000,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x051a,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 4
},
{
.name = "40",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 40000000,
.multi = 1.0,
.voltage = 5000,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x051a,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET1,
.manufacturer = "Cyrix",
.name = "Cx486DX2",
.internal_name = "cx486dx2",
.cpus = (const CPU[]) {
{
.name = "50",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 2.0,
.voltage = 5000,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x081b,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 6
},
{
.name = "66",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 2.0,
.voltage = 5000,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x0b1b,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{
.name = "80",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 80000000,
.multi = 2.0,
.voltage = 5000,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x311b,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3,
.manufacturer = "Cyrix",
.name = "Cx486DX4",
.internal_name = "cx486dx4",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x480,
.cpuid_model = 0,
.cyrix_id = 0x361f,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 9
},
{
.name = "100",
.cpu_type = CPU_Cx486DX,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 3.0,
.voltage = 5000,
.edx_reset = 0x480,
.cpuid_model = 0,
.cyrix_id = 0x361f,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET3,
.manufacturer = "Cyrix",
.name = "Cx5x86",
.internal_name = "cx5x86",
.cpus = (const CPU[]) {
{ /*If we're including the Pentium 50, might as well include this*/
.name = "80",
.cpu_type = CPU_Cx5x86,
.fpus = fpus_internal,
.rspeed = 80000000,
.multi = 2.0,
.voltage = 3450,
.edx_reset = 0x480,
.cpuid_model = 0,
.cyrix_id = 0x002f,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{
.name = "100",
.cpu_type = CPU_Cx5x86,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 3.0,
.voltage = 3450,
.edx_reset = 0x480,
.cpuid_model = 0,
.cyrix_id = 0x002f,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{
.name = "120",
.cpu_type = CPU_Cx5x86,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 3.0,
.voltage = 3450,
.edx_reset = 0x480,
.cpuid_model = 0,
.cyrix_id = 0x002f,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 15
},
{
.name = "133",
.cpu_type = CPU_Cx5x86,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 4.0,
.voltage = 3450,
.edx_reset = 0x480,
.cpuid_model = 0,
.cyrix_id = 0x002f,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 16
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_STPC,
.manufacturer = "ST",
.name = "STPC-DX",
.internal_name = "stpc_dx",
.cpus = (const CPU[]) {
{
.name = "66",
.cpu_type = CPU_STPC,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 3300,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x051a,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{
.name = "75",
.cpu_type = CPU_STPC,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 1.0,
.voltage = 3300,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x051a,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 5
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_STPC,
.manufacturer = "ST",
.name = "STPC-DX2",
.internal_name = "stpc_dx2",
.cpus = (const CPU[]) {
{
.name = "133",
.cpu_type = CPU_STPC,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3300,
.edx_reset = 0x430,
.cpuid_model = 0,
.cyrix_id = 0x0b1b,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET4,
.manufacturer = "Intel",
.name = "Pentium",
.internal_name = "pentium_p5",
.cpus = (const CPU[]) {
{
.name = "50 (Q0399)",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 50000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x513,
.cpuid_model = 0x513,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 4,
.mem_write_cycles = 4,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 6
},
{
.name = "60",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 60000000,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x517,
.cpuid_model = 0x517,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 7
},
{
.name = "66",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1,
.voltage = 5000,
.edx_reset = 0x517,
.cpuid_model = 0x517,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET4,
.manufacturer = "Intel",
.name = "Pentium OverDrive",
.internal_name = "pentium_p54c_od5v",
.cpus = (const CPU[]) {
{
.name = "100",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x51A,
.cpuid_model = 0x51A,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 12
},
{
.name = "120",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x51A,
.cpuid_model = 0x51A,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "133",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2,
.voltage = 5000,
.edx_reset = 0x51A,
.cpuid_model = 0x51A,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Intel",
.name = "Pentium",
.internal_name = "pentium_p54c",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x522,
.cpuid_model = 0x522,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 9
},
{
.name = "90",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 90000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x524,
.cpuid_model = 0x524,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 21/2
},
{
.name = "100/50",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x524,
.cpuid_model = 0x524,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 10,
.mem_write_cycles = 10,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 12
},
{
.name = "100/66",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x526,
.cpuid_model = 0x526,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{
.name = "120",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x526,
.cpuid_model = 0x526,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "133",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "150",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "166",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "200",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Intel",
.name = "Pentium MMX",
.internal_name = "pentium_p55c",
.cpus = (const CPU[]) {
{
.name = "166",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2800,
.edx_reset = 0x543,
.cpuid_model = 0x543,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "200",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2800,
.edx_reset = 0x543,
.cpuid_model = 0x543,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "233",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2800,
.edx_reset = 0x543,
.cpuid_model = 0x543,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Intel",
.name = "Mobile Pentium MMX",
.internal_name = "pentium_tillamook",
.cpus = (const CPU[]) {
{
.name = "120",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x543,
.cpuid_model = 0x543,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "133",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x543,
.cpuid_model = 0x543,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "150",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 2800,
.edx_reset = 0x544,
.cpuid_model = 0x544,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "166",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2800,
.edx_reset = 0x544,
.cpuid_model = 0x544,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "200",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2800,
.edx_reset = 0x581,
.cpuid_model = 0x581,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "233",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2800,
.edx_reset = 0x581,
.cpuid_model = 0x581,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{
.name = "266",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2800,
.edx_reset = 0x582,
.cpuid_model = 0x582,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2800,
.edx_reset = 0x582,
.cpuid_model = 0x582,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 36
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Intel",
.name = "Pentium OverDrive",
.internal_name = "pentium_p54c_od3v",
.cpus = (const CPU[]) {
{
.name = "125",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 125000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 15
},
{
.name = "150",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "166",
.cpu_type = CPU_PENTIUM,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x52c,
.cpuid_model = 0x52c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Intel",
.name = "Pentium OverDrive MMX",
.internal_name = "pentium_p55c_od",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x1542,
.cpuid_model = 0x1542,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 9
},
{
.name = "125",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 125000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x1542,
.cpuid_model = 0x1542,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 15
},
{
.name = "150/60",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x1542,
.cpuid_model = 0x1542,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "166",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 166000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x1542,
.cpuid_model = 0x1542,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "180",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 180000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x1542,
.cpuid_model = 0x1542,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 21
},
{
.name = "200",
.cpu_type = CPU_PENTIUMMMX,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x1542,
.cpuid_model = 0x1542,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "IDT",
.name = "WinChip",
.internal_name = "winchip",
.cpus = (const CPU[]) {
{
.name = "75",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 9
},
{
.name = "90",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 90000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 21/2
},
{
.name = "100",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{
.name = "120",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "133",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "150",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "166",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 40
},
{
.name = "180",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 180000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 21
},
{
.name = "200",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "225",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 225000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 27
},
{
.name = "240",
.cpu_type = CPU_WINCHIP,
.fpus = fpus_internal,
.rspeed = 240000000,
.multi = 4.0,
.voltage = 3520,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 28
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "IDT",
.name = "WinChip 2",
.internal_name = "winchip2",
.cpus = (const CPU[]) {
{
.name = "200",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 3*8
},
{
.name = "225",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 225000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 3*9
},
{
.name = "240",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 240000000,
.multi = 4.0,
.voltage = 3520,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 30
},
{
.name = "250",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 250000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 30
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "IDT",
.name = "WinChip 2A",
.internal_name = "winchip2a",
.cpus = (const CPU[]) {
{
.name = "200",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x587,
.cpuid_model = 0x587,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 3*8
},
{
.name = "233",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 3520,
.edx_reset = 0x587,
.cpuid_model = 0x587,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = (7*8)/2
},
{
.name = "266",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 7.0/3.0,
.voltage = 3520,
.edx_reset = 0x587,
.cpuid_model = 0x587,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 28
},
{
.name = "300",
.cpu_type = CPU_WINCHIP2,
.fpus = fpus_internal,
.rspeed = 250000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x587,
.cpuid_model = 0x587,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 8,
.cache_write_cycles = 8,
.atclk_div = 30
},
{ .name = "", 0 }
}
},
#ifdef USE_AMD_K5
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K5 (Model 0)",
.internal_name = "k5_ssa5",
.cpus = (const CPU[]) {
{
.name = "75 (PR75)",
.cpu_type = CPU_K5,
.fpus = fpus_internal,
.rspeed = 75000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x501,
.cpuid_model = 0x501,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 9
},
{
.name = "90 (PR90)",
.cpu_type = CPU_K5,
.fpus = fpus_internal,
.rspeed = 90000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x501,
.cpuid_model = 0x501,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 21/2
},
{
.name = "100 (PR100)",
.cpu_type = CPU_K5,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 3520,
.edx_reset = 0x501,
.cpuid_model = 0x501,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K5 (Model 1/2/3)",
.internal_name = "k5_5k86",
.cpus = (const CPU[]) {
{
.name = "90 (PR120)",
.cpu_type = CPU_5K86,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x511,
.cpuid_model = 0x511,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "100 (PR133)",
.cpu_type = CPU_5K86,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x514,
.cpuid_model = 0x514,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "105 (PR150)",
.cpu_type = CPU_5K86,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x524,
.cpuid_model = 0x524,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "116.7 (PR166)",
.cpu_type = CPU_5K86,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 3520,
.edx_reset = 0x524,
.cpuid_model = 0x524,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "133 (PR200)",
.cpu_type = CPU_5K86,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3520,
.edx_reset = 0x534,
.cpuid_model = 0x534,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ .name = "", 0 }
}
},
#endif /* USE_AMD_K5 */
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K6 (Model 6)",
.internal_name = "k6_m6",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "66",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 2900,
.edx_reset = 0x561,
.cpuid_model = 0x561,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2900,
.edx_reset = 0x561,
.cpuid_model = 0x561,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2900,
.edx_reset = 0x561,
.cpuid_model = 0x561,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "166",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2900,
.edx_reset = 0x561,
.cpuid_model = 0x561,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "200",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2900,
.edx_reset = 0x561,
.cpuid_model = 0x561,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "233",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 3200,
.edx_reset = 0x561,
.cpuid_model = 0x561,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{ .name = "", 0 }
}
}, {
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K6 (Model 7)",
.internal_name = "k6_m7",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "100",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2200,
.edx_reset = 0x570,
.cpuid_model = 0x570,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2200,
.edx_reset = 0x570,
.cpuid_model = 0x570,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 166666666, .multi = 2.5,
.voltage = 2200, .edx_reset = 0x570,
.cpuid_model = 0x570, .cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "200",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 200000000, .multi = 3.0,
.voltage = 2200, .edx_reset = 0x570,
.cpuid_model = 0x570, .cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "233",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 233333333, .multi = 3.5,
.voltage = 2200, .edx_reset = 0x570,
.cpuid_model = 0x570,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{
.name = "266",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x570,
.cpuid_model = 0x570,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_K6,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2200,
.edx_reset = 0x570,
.cpuid_model = 0x570,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 36
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K6-2",
.internal_name = "k6_2",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "100",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "233",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{
.name = "266",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 3.0,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 36
},
{
.name = "333",
.cpu_type = CPU_K6_2,
.fpus = fpus_internal,
.rspeed = 332500000,
.multi = 3.5,
.voltage = 2200,
.edx_reset = 0x580,
.cpuid_model = 0x580,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 30,
.mem_write_cycles = 30,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 40
},
{
.name = "350",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 350000000,
.multi = 3.5,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 32,
.mem_write_cycles = 32,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 42
},
{
.name = "366",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 366666666,
.multi = 5.5,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 33,
.mem_write_cycles = 33,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 44
},
{
.name = "380",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 380000000,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 34,
.mem_write_cycles = 34,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 46
},
{
.name = "400/66",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 6.0,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "400/100",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "450",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{
.name = "475",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 475000000,
.multi = 5.0,
.voltage = 2400,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 43,
.mem_write_cycles = 43,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 57
},
{
.name = "500",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 500000000,
.multi = 5.0,
.voltage = 2400,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 45,
.mem_write_cycles = 45,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 60
},
{
.name = "533",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 533333333,
.multi = 5.5,
.voltage = 2200,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 48,
.mem_write_cycles = 48,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 64
},
{
.name = "550",
.cpu_type = CPU_K6_2C,
.fpus = fpus_internal,
.rspeed = 550000000,
.multi = 5.5,
.voltage = 2300,
.edx_reset = 0x58c,
.cpuid_model = 0x58c,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 50,
.mem_write_cycles = 50,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 66
},
{ .name = "", 0 }
}
}, {
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K6-2+",
.internal_name = "k6_2p",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "100",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{ /* out of spec */
.name = "266",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{ /* out of spec */
.name = "300",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 3.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 36
},
{ /* out of spec */
.name = "333",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 332500000,
.multi = 3.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 30,
.mem_write_cycles = 30,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 40
},
{ /* out of spec */
.name = "350",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 350000000,
.multi = 3.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 32,
.mem_write_cycles = 32,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 42
},
{ /* out of spec */
.name = "366",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 366666666,
.multi = 5.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 33,
.mem_write_cycles = 33,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 44
},
{ /* out of spec */
.name = "380",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 380000000,
.multi = 4.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 34,
.mem_write_cycles = 34,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 46
},
{ /* out of spec */
.name = "400/66",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 6.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{ /* out of spec */
.name = "400/100",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 4.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "450",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{
.name = "475",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 475000000,
.multi = 5.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 43,
.mem_write_cycles = 43,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 57
},
{
.name = "500",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 500000000,
.multi = 5.0,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 45,
.mem_write_cycles = 45,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 60
},
{
.name = "533",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 533333333,
.multi = 5.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 48,
.mem_write_cycles = 48,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 64
},
{
.name = "550",
.cpu_type = CPU_K6_2P,
.fpus = fpus_internal,
.rspeed = 550000000,
.multi = 5.5,
.voltage = 2000,
.edx_reset = 0x5d4,
.cpuid_model = 0x5d4,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 50,
.mem_write_cycles = 50,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 66
},
{ .name = "", 0 }
}
}, {
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K6-III",
.internal_name = "k6_3",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "100",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{ /* out of spec */
.name = "266",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{ /* out of spec */
.name = "300",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 3.0,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 36
},
{ /* out of spec */
.name = "333",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 332500000,
.multi = 3.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 30,
.mem_write_cycles = 30,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 40
},
{ /* out of spec */
.name = "350",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 350000000,
.multi = 3.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 32,
.mem_write_cycles = 32,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 42
},
{ /* out of spec */
.name = "366",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 366666666,
.multi = 5.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 33,
.mem_write_cycles = 33,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 44
},
{ /* out of spec */
.name = "380",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 380000000,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 34,
.mem_write_cycles = 34,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 46
},
{
.name = "400",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 4.0,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "450",
.cpu_type = CPU_K6_3,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2200,
.edx_reset = 0x591,
.cpuid_model = 0x591,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "AMD",
.name = "K6-III+",
.internal_name = "k6_3p",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "100",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 7,
.mem_write_cycles = 7,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 9
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{ /* out of spec */
.name = "266",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{ /* out of spec */
.name = "300",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 3.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 36
},
{ /* out of spec */
.name = "333",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 332500000,
.multi = 3.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 30,
.mem_write_cycles = 30,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 40
},
{ /* out of spec */
.name = "350",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 350000000,
.multi = 3.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 32,
.mem_write_cycles = 32,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 42
},
{ /* out of spec */
.name = "366",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 366666666,
.multi = 5.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 33,
.mem_write_cycles = 33,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 44
},
{ /* out of spec */
.name = "380",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 380000000,
.multi = 4.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 34,
.mem_write_cycles = 34,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 46
},
{
.name = "400",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 4.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "450",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{
.name = "475",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 475000000,
.multi = 5.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 43,
.mem_write_cycles = 43,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 57
},
{
.name = "500",
.cpu_type = CPU_K6_3P,
.fpus = fpus_internal,
.rspeed = 500000000,
.multi = 5.0,
.voltage = 2000,
.edx_reset = 0x5d0,
.cpuid_model = 0x5d0,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 45,
.mem_write_cycles = 45,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 60
},
{ .name = "", 0 }
}
},
#ifdef USE_CYRIX_6X86
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Cyrix",
.name = "Cx6x86",
.internal_name = "cx6x86",
.cpus = (const CPU[]) {
{
.name = "80 (PR90+)",
.cpu_type = CPU_Cx6x86,
.fpus = fpus_internal,
.rspeed = 80000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x520,
.cpuid_model = 0x520,
.cyrix_id = 0x1731,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 10
},
{
.name = "100 (PR120+)",
.cpu_type = CPU_Cx6x86,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x520,
.cpuid_model = 0x520,
.cyrix_id = 0x1731,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 10,
.mem_write_cycles = 10,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 12
},
{
.name = "110 (PR133+)",
.cpu_type = CPU_Cx6x86,
.fpus = fpus_internal,
.rspeed = 110000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x520,
.cpuid_model = 0x520,
.cyrix_id = 0x1731,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 10,
.mem_write_cycles = 10,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "120 (PR150+)",
.cpu_type = CPU_Cx6x86,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x520,
.cpuid_model = 0x520,
.cyrix_id = 0x1731,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "133 (PR166+)",
.cpu_type = CPU_Cx6x86,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x520,
.cpuid_model = 0x520,
.cyrix_id = 0x1731,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "150 (PR200+)",
.cpu_type = CPU_Cx6x86,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.0,
.voltage = 3520,
.edx_reset = 0x520,
.cpuid_model = 0x520,
.cyrix_id = 0x1731,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 18
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Cyrix",
.name = "Cx6x86L",
.internal_name = "cx6x86l",
.cpus = (const CPU[]) {
{
.name = "110 (PR133+)",
.cpu_type = CPU_Cx6x86L,
.fpus = fpus_internal,
.rspeed = 110000000,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0x2231,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 10,
.mem_write_cycles = 10,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "120 (PR150+)",
.cpu_type = CPU_Cx6x86L,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0x2231,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 14
},
{
.name = "133 (PR166+)",
.cpu_type = CPU_Cx6x86L,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0x2231,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "150 (PR200+)",
.cpu_type = CPU_Cx6x86L,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x540,
.cpuid_model = 0x540,
.cyrix_id = 0x2231,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 18
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Cyrix",
.name = "Cx6x86MX",
.internal_name = "cx6x86mx",
.cpus = (const CPU[]) {
{
.name = "133 (PR166)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2900,
.edx_reset = 0x600,
.cpuid_model = 0x600,
.cyrix_id = 0x0451,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{
.name = "166 (PR200)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2900,
.edx_reset = 0x600,
.cpuid_model = 0x600,
.cyrix_id = 0x0452,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "187.5 (PR233)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 187500000,
.multi = 2.5,
.voltage = 2900,
.edx_reset = 0x600,
.cpuid_model = 0x600,
.cyrix_id = 0x0452,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 45/2
},
{
.name = "208.3 (PR266)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 208333333,
.multi = 2.5,
.voltage = 2700,
.edx_reset = 0x600,
.cpuid_model = 0x600,
.cyrix_id = 0x0452,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 17,
.mem_write_cycles = 17,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 25
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET5_7,
.manufacturer = "Cyrix",
.name = "MII",
.internal_name = "mii",
.cpus = (const CPU[]) {
{
.name = "233 (PR300)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2900,
.edx_reset = 0x601,
.cpuid_model = 0x601,
.cyrix_id = 0x0852,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 28
},
{
.name = "250/83 (PR333)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 250000000,
.multi = 3.0,
.voltage = 2900,
.edx_reset = 0x601,
.cpuid_model = 0x601,
.cyrix_id = 0x0853,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 23,
.mem_write_cycles = 23,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 30
},
{
.name = "250/100 (PR366)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 250000000,
.multi = 2.5,
.voltage = 2900,
.edx_reset = 0x601,
.cpuid_model = 0x601,
.cyrix_id = 0x0853,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 23,
.mem_write_cycles = 23,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 30
},
{
.name = "285 (PR400)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 285000000,
.multi = 3.0,
.voltage = 2900,
.edx_reset = 0x601,
.cpuid_model = 0x601,
.cyrix_id = 0x0853,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 34
},
{
.name = "300 (PR433)",
.cpu_type = CPU_Cx6x86MX,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 3.0,
.voltage = 2900,
.edx_reset = 0x601,
.cpuid_model = 0x601,
.cyrix_id = 0x0853,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 36
},
{ .name = "", 0 }
}
},
#endif /* USE_CYRIX_6X86 */
{
.package = CPU_PKG_SOCKET8,
.manufacturer = "Intel",
.name = "Pentium Pro",
.internal_name = "pentiumpro",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "60",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 60000000,
.multi = 1.0,
.voltage = 3100,
.edx_reset = 0x612,
.cpuid_model = 0x612,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 1,
.cache_write_cycles = 1,
.atclk_div = 7
},
{ /* out of spec */
.name = "66",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 3300,
.edx_reset = 0x617,
.cpuid_model = 0x617,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 1,
.cache_write_cycles = 1,
.atclk_div = 8
},
{ /* out of spec */
.name = "90",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 90000000,
.multi = 1.5,
.voltage = 3100,
.edx_reset = 0x612,
.cpuid_model = 0x612,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 11
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 3300,
.edx_reset = 0x617,
.cpuid_model = 0x617,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 12
},
{ /* out of spec */
.name = "120",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 120000000,
.multi = 2.0,
.voltage = 3100,
.edx_reset = 0x612,
.cpuid_model = 0x612,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 5,
.cache_write_cycles = 5,
.atclk_div = 14
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3300,
.edx_reset = 0x617,
.cpuid_model = 0x617,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 5,
.cache_write_cycles = 5,
.atclk_div = 16
}, /* out of spec */
{
.name = "150",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 2.5,
.voltage = 3100,
.edx_reset = 0x612,
.cpuid_model = 0x612,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 35/2
},
{
.name = "166",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 3300,
.edx_reset = 0x617,
.cpuid_model = 0x617,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{
.name = "180",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 180000000,
.multi = 3.0,
.voltage = 3300,
.edx_reset = 0x617,
.cpuid_model = 0x617,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 21
},
{
.name = "200",
.cpu_type = CPU_PENTIUMPRO,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3300,
.edx_reset = 0x617,
.cpuid_model = 0x617,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET8,
.manufacturer = "Intel",
.name = "Pentium II OverDrive",
.internal_name = "pentium2_od",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "66",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{ /* out of spec */
.name = "266",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 5.0,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 36
},
{
.name = "333",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 333333333,
.multi = 5.0,
.voltage = 3300,
.edx_reset = 0x1632,
.cpuid_model = 0x1632,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 40
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SLOT1,
.manufacturer = "Intel",
.name = "Pentium II (Klamath)",
.internal_name = "pentium2_klamath",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "66",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{
.name = "233",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 28
},
{
.name = "266",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_PENTIUM2,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2800,
.edx_reset = 0x634,
.cpuid_model = 0x634,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 25,
.mem_write_cycles = 25,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 36
},
{ .name = "", 0 }
}
}, {
.package = CPU_PKG_SLOT1,
.manufacturer = "Intel",
.name = "Pentium II (Deschutes)",
.internal_name = "pentium2_deschutes",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "66",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 5,
.cache_write_cycles = 5,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 28
},
{
.name = "266",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x651,
.cpuid_model = 0x651,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 25,
.mem_write_cycles = 25,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 36
},
{
.name = "333",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 333333333,
.multi = 5.0,
.voltage = 2050,
.edx_reset = 0x651,
.cpuid_model = 0x651,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 40
},
{
.name = "350",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 350000000,
.multi = 3.5,
.voltage = 2050,
.edx_reset = 0x651,
.cpuid_model = 0x651,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 32,
.mem_write_cycles = 32,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 42
},
{
.name = "400",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 4.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "450",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SLOT1,
.manufacturer = "Intel",
.name = "Celeron (Covington)",
.internal_name = "celeron_covington",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "66",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 8
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 18,
.cache_write_cycles = 18,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 21,
.cache_write_cycles = 21,
.atclk_div = 28
},
{
.name = "266",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2050,
.edx_reset = 0x650,
.cpuid_model = 0x650,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 24,
.cache_write_cycles = 24,
.atclk_div = 32
},
{
.name = "300",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x651,
.cpuid_model = 0x651,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 25,
.mem_write_cycles = 25,
.cache_read_cycles = 25,
.cache_write_cycles = 25,
.atclk_div = 36
},
{ .name = "", 0 }
}
}, {
.package = CPU_PKG_SLOT2,
.manufacturer = "Intel",
.name = "Pentium II Xeon",
.internal_name = "pentium2_xeon",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 12
},
{ /* out of spec */
.name = "150",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 150000000,
.multi = 1.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 18
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 2.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 24
},
{ /* out of spec */
.name = "250",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 250000000,
.multi = 2.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 22,
.mem_write_cycles = 22,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 30
},
{ /* out of spec */
.name = "300",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 3.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 36
},
{ /* out of spec */
.name = "350",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 350000000,
.multi = 3.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 32,
.mem_write_cycles = 32,
.cache_read_cycles = 10,
.cache_write_cycles = 10,
.atclk_div = 42
},
{
.name = "400",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 4.0,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "450",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x652,
.cpuid_model = 0x652,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET370,
.manufacturer = "Intel",
.name = "Celeron (Mendocino)",
.internal_name = "celeron_mendocino",
.cpus = (const CPU[]) {
{ /* out of spec */
.name = "66",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ /* out of spec */
.name = "100",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 8,
.mem_write_cycles = 8,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of spec */
.name = "133",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 11,
.mem_write_cycles = 11,
.cache_read_cycles = 5,
.cache_write_cycles = 5,
.atclk_div = 16
},
{ /* out of spec */
.name = "166",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 14,
.mem_write_cycles = 14,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of spec */
.name = "200",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 17,
.mem_write_cycles = 17,
.cache_read_cycles = 8,
.cache_write_cycles = 8,
.atclk_div = 24
},
{ /* out of spec */
.name = "233",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 19,
.mem_write_cycles = 19,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 28
},
{ /* out of spec */
.name = "266",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 22,
.mem_write_cycles = 22,
.cache_read_cycles = 11,
.cache_write_cycles = 11,
.atclk_div = 32
},
{
.name = "300A",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 25,
.mem_write_cycles = 25,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 36
},
{
.name = "333",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 333333333,
.multi = 5.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 40
},
{
.name = "366",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 366666666,
.multi = 5.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 33,
.mem_write_cycles = 33,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 44
},
{
.name = "400",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 6.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 48
},
{
.name = "433",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 433333333,
.multi = 6.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 39,
.mem_write_cycles = 39,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 51
},
{
.name = "466",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 466666666,
.multi = 7.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 42,
.mem_write_cycles = 42,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 56
},
{
.name = "500",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 500000000,
.multi = 7.5,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 45,
.mem_write_cycles = 45,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 60
},
{
.name = "533",
.cpu_type = CPU_PENTIUM2D,
.fpus = fpus_internal,
.rspeed = 533333333,
.multi = 8.0,
.voltage = 2050,
.edx_reset = 0x665,
.cpuid_model = 0x665,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_REQUIRES_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 48,
.mem_write_cycles = 48,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 64
},
{ .name = "", 0 }
}
},
{
.package = CPU_PKG_SOCKET370,
.manufacturer = "VIA",
.name = "Cyrix III",
.internal_name = "c3_samuel",
.cpus = (const CPU[]) {
{ /* out of multiplier range */
.name = "66",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 66666666,
.multi = 1.0,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 6,
.mem_write_cycles = 6,
.cache_read_cycles = 3,
.cache_write_cycles = 3,
.atclk_div = 8
},
{ /* out of multiplier range */
.name = "100",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 100000000,
.multi = 1.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 9,
.mem_write_cycles = 9,
.cache_read_cycles = 4,
.cache_write_cycles = 4,
.atclk_div = 12
},
{ /* out of multiplier range */
.name = "133",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 133333333,
.multi = 2.0,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 12,
.mem_write_cycles = 12,
.cache_read_cycles = 6,
.cache_write_cycles = 6,
.atclk_div = 16
},
{ /* out of multiplier range */
.name = "166",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 166666666,
.multi = 2.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 15,
.mem_write_cycles = 15,
.cache_read_cycles = 7,
.cache_write_cycles = 7,
.atclk_div = 20
},
{ /* out of multiplier range */
.name = "200",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 200000000,
.multi = 3.0,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 18,
.mem_write_cycles = 18,
.cache_read_cycles = 8,
.cache_write_cycles = 8,
.atclk_div = 24
},
{ /* out of multiplier range */
.name = "233",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 233333333,
.multi = 3.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 21,
.mem_write_cycles = 21,
.cache_read_cycles = 9,
.cache_write_cycles = 9,
.atclk_div = 28
},
{ /* out of multiplier range */
.name = "266",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 266666666,
.multi = 4.0,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 24,
.mem_write_cycles = 24,
.cache_read_cycles = 12,
.cache_write_cycles = 12,
.atclk_div = 32
},
{ /* out of spec */
.name = "300",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 300000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 27,
.mem_write_cycles = 27,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 36
},
{ /* out of spec */
.name = "333",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 333333333,
.multi = 5.0,
.voltage = 2050,
.edx_reset = 0x662,
.cpuid_model = 0x662,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 30,
.mem_write_cycles = 30,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 40
},
{ /* out of spec */
.name = "366",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 366666666,
.multi = 5.5,
.voltage = 2050,
.edx_reset = 0x662,
.cpuid_model = 0x662,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 33,
.mem_write_cycles = 33,
.cache_read_cycles = 16,
.cache_write_cycles = 16,
.atclk_div = 44
},
{
.name = "400",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 400000000,
.multi = 6.0,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 36,
.mem_write_cycles = 36,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 48
},
{ /* out of spec */
.name = "433",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 433333333,
.multi = 6.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 39,
.mem_write_cycles = 39,
.cache_read_cycles = 18,
.cache_write_cycles = 18,
.atclk_div = 52
},
{
.name = "450",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 450000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 41,
.mem_write_cycles = 41,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 54
},
{ /* out of spec */
.name = "466",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 466666666,
.multi = 6.5,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 42,
.mem_write_cycles = 42,
.cache_read_cycles = 14,
.cache_write_cycles = 14,
.atclk_div = 56
},
{
.name = "500",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 500000000,
.multi = 5.0,
.voltage = 2050,
.edx_reset = 0x662,
.cpuid_model = 0x662,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 45,
.mem_write_cycles = 45,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 60
},
{ /* out of spec */
.name = "533",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 533333333,
.multi = 8.0,
.voltage = 2050,
.edx_reset = 0x660,
.cpuid_model = 0x660,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 48,
.mem_write_cycles = 48,
.cache_read_cycles = 15,
.cache_write_cycles = 15,
.atclk_div = 64
},
{
.name = "550",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 550000000,
.multi = 5.5,
.voltage = 2050,
.edx_reset = 0x662,
.cpuid_model = 0x662,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 50,
.mem_write_cycles = 50,
.cache_read_cycles = 17,
.cache_write_cycles = 17,
.atclk_div = 66
},
{
.name = "600/100",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 600000000,
.multi = 6.0,
.voltage = 2050,
.edx_reset = 0x662,
.cpuid_model = 0x662,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 54,
.mem_write_cycles = 54,
.cache_read_cycles = 18,
.cache_write_cycles = 18,
.atclk_div = 72
},
{
.name = "600/133",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 600000000,
.multi = 4.5,
.voltage = 2050,
.edx_reset = 0x663,
.cpuid_model = 0x663,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 54,
.mem_write_cycles = 54,
.cache_read_cycles = 13,
.cache_write_cycles = 13,
.atclk_div = 72
},
{
.name = "650",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 650000000,
.multi = 6.5,
.voltage = 2050,
.edx_reset = 0x663,
.cpuid_model = 0x663,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 58,
.mem_write_cycles = 58,
.cache_read_cycles = 20,
.cache_write_cycles = 20,
.atclk_div = 78
},
{
.name = "667",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 666666667,
.multi = 5.0,
.voltage = 2050,
.edx_reset = 0x663,
.cpuid_model = 0x663,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 60,
.mem_write_cycles = 60,
.cache_read_cycles = 16,
.cache_write_cycles = 16,
.atclk_div = 80
},
{
.name = "700",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 700000000,
.multi = 7.0,
.voltage = 2050,
.edx_reset = 0x663,
.cpuid_model = 0x663,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 63,
.mem_write_cycles = 63,
.cache_read_cycles = 21,
.cache_write_cycles = 21,
.atclk_div = 84
},
{
.name = "733",
.cpu_type = CPU_CYRIX3S,
.fpus = fpus_internal,
.rspeed = 733333333,
.multi = 5.5,
.voltage = 2050,
.edx_reset = 0x663,
.cpuid_model = 0x663,
.cyrix_id = 0,
.cpu_flags = CPU_SUPPORTS_DYNAREC | CPU_FIXED_MULTIPLIER,
.mem_read_cycles = 66,
.mem_write_cycles = 66,
.cache_read_cycles = 18,
.cache_write_cycles = 18,
.atclk_div = 88
},
{ .name = "", 0 }
}
},
{ .package = 0, 0 }
// clang-format on
};
``` | /content/code_sandbox/src/cpu/cpu_table.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 76,922 |
```objective-c
static int
opPAND_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q &= src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPAND_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q &= src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPANDN_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q = ~dst->q & src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPANDN_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q = ~dst->q & src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPOR_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q |= src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPOR_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q |= src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPXOR_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q ^= src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPXOR_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->q ^= src.q;
MMX_SETEXP(cpu_reg);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mmx_logic.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 675 |
```objective-c
#define REP_OPS(size, CNT_REG, SRC_REG, DEST_REG) \
static int opREP_INSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
addr64 = 0x00000000; \
\
if (CNT_REG > 0) { \
uint8_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 1); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = 0; \
do_mmut_wb(es, DEST_REG, &addr64); \
if (cpu_state.abrt) \
return 1; \
temp = inb(DX); \
writememb_n(es, DEST_REG, addr64, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= 15; \
reads++; \
writes++; \
total_cycles += 15; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_INSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
addr64a[0] = addr64a[1] = 0x00000000; \
\
if (CNT_REG > 0) { \
uint16_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 2); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = 0; \
do_mmut_ww(es, DEST_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp = inw(DX); \
writememw_n(es, DEST_REG, addr64a, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= 15; \
reads++; \
writes++; \
total_cycles += 15; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_INSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
\
if (CNT_REG > 0) { \
uint32_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 4); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = 0; \
do_mmut_wl(es, DEST_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp = inl(DX); \
writememl_n(es, DEST_REG, addr64a, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= 15; \
reads++; \
writes++; \
total_cycles += 15; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, writes, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_OUTSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
if (CNT_REG > 0) { \
uint8_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG); \
temp = readmemb(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 1); \
outb(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG--; \
else \
SRC_REG++; \
CNT_REG--; \
cycles -= 14; \
reads++; \
writes++; \
total_cycles += 14; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_OUTSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
if (CNT_REG > 0) { \
uint16_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
temp = readmemw(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 2); \
outw(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 2; \
else \
SRC_REG += 2; \
CNT_REG--; \
cycles -= 14; \
reads++; \
writes++; \
total_cycles += 14; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_OUTSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
if (CNT_REG > 0) { \
uint32_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
temp = readmeml(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 4); \
outl(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 4; \
else \
SRC_REG += 4; \
CNT_REG--; \
cycles -= 14; \
reads++; \
writes++; \
total_cycles += 14; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, writes, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_MOVSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64 = addr64_2 = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint8_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = 0; \
do_mmut_rb(cpu_state.ea_seg->base, SRC_REG, &addr64); \
if (cpu_state.abrt) \
break; \
do_mmut_wb(es, DEST_REG, &addr64_2); \
if (cpu_state.abrt) \
break; \
temp = readmemb_n(cpu_state.ea_seg->base, SRC_REG, addr64); \
if (cpu_state.abrt) \
return 1; \
writememb_n(es, DEST_REG, addr64_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG--; \
SRC_REG--; \
} else { \
DEST_REG++; \
SRC_REG++; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
reads++; \
writes++; \
total_cycles += is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_MOVSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64a[0] = addr64a[1] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint16_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = 0; \
do_mmut_rw(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
break; \
do_mmut_ww(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
break; \
temp = readmemw_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
writememw_n(es, DEST_REG, addr64a_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 2; \
SRC_REG -= 2; \
} else { \
DEST_REG += 2; \
SRC_REG += 2; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
reads++; \
writes++; \
total_cycles += is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_MOVSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint32_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = 0; \
do_mmut_rl(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
break; \
do_mmut_wl(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
break; \
temp = readmeml_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
writememl_n(es, DEST_REG, addr64a_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 4; \
SRC_REG -= 4; \
} else { \
DEST_REG += 4; \
SRC_REG += 4; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
reads++; \
writes++; \
total_cycles += is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_STOSB_##size(uint32_t fetchdat) \
{ \
int writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
writememb(es, DEST_REG, AL); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
writes++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_STOSW_##size(uint32_t fetchdat) \
{ \
int writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
writememw(es, DEST_REG, AX); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
writes++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_STOSL_##size(uint32_t fetchdat) \
{ \
int writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
writememl(es, DEST_REG, EAX); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
writes++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, 0, 0, writes, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_LODSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG); \
AL = readmemb(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG--; \
else \
SRC_REG++; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
reads++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_LODSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
AX = readmemw(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 2; \
else \
SRC_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
reads++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_LODSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
EAX = readmeml(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 4; \
else \
SRC_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
reads++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
}
#define CHEK_READ(a, b, c)
#define REP_OPS_CMPS_SCAS(size, CNT_REG, SRC_REG, DEST_REG, FV) \
static int opREP_CMPSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
\
addr64 = addr64_2 = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint8_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = uncached = 0; \
do_mmut_rb(cpu_state.ea_seg->base, SRC_REG, &addr64); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rb2(es, DEST_REG, &addr64_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmemb_n(cpu_state.ea_seg->base, SRC_REG, addr64); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = old_rl2; \
temp2 = readmemb_n(es, DEST_REG, addr64_2); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = (uintptr_t) LOOKUP_INV; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG--; \
SRC_REG--; \
} else { \
DEST_REG++; \
SRC_REG++; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
reads += 2; \
total_cycles += is486 ? 7 : 9; \
setsub8(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_CMPSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
\
addr64a[0] = addr64a[1] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint16_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = uncached = 0; \
do_mmut_rw(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rw2(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmemw_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = old_rl2; \
temp2 = readmemw_n(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = (uintptr_t) LOOKUP_INV; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 2; \
SRC_REG -= 2; \
} else { \
DEST_REG += 2; \
SRC_REG += 2; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
reads += 2; \
total_cycles += is486 ? 7 : 9; \
setsub16(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_CMPSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
\
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint32_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = uncached = 0; \
do_mmut_rl(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rl2(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmeml_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = old_rl2; \
temp2 = readmeml_n(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = (uintptr_t) LOOKUP_INV; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 4; \
SRC_REG -= 4; \
} else { \
DEST_REG += 4; \
SRC_REG += 4; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
reads += 2; \
total_cycles += is486 ? 7 : 9; \
setsub32(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_SCASB_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
uint8_t temp = readmemb(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub8(AL, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
reads++; \
total_cycles += is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_SCASW_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
uint16_t temp = readmemw(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub16(AX, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
reads++; \
total_cycles += is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_SCASL_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
uint32_t temp = readmeml(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub32(EAX, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
reads++; \
total_cycles += is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
}
REP_OPS(a16, CX, SI, DI)
REP_OPS(a32, ECX, ESI, EDI)
REP_OPS_CMPS_SCAS(a16_NE, CX, SI, DI, 0)
REP_OPS_CMPS_SCAS(a16_E, CX, SI, DI, 1)
REP_OPS_CMPS_SCAS(a32_NE, ECX, ESI, EDI, 0)
REP_OPS_CMPS_SCAS(a32_E, ECX, ESI, EDI, 1)
static int
opREPNE(uint32_t fetchdat)
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
return x86_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
opREPE(uint32_t fetchdat)
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
return x86_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
``` | /content/code_sandbox/src/cpu/x86_ops_rep.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 10,190 |
```c
/*Basic P6 timing model by plant/nerd73. Based on the K6 timing model*/
/*Some cycle timings come from path_to_url
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/machine.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_ops.h"
#include "x86seg_common.h"
#include "x87_sf.h"
#include "x87.h"
#include "386_common.h"
#include "codegen.h"
#include "codegen_ops.h"
#include "codegen_timing_common.h"
typedef enum uop_type_t {
UOP_ALU = 0, /*Executes in Port 0 or 1 ALU units*/
UOP_ALUP0, /*Executes in Port 0 ALU unit*/
UOP_LOAD, /*Executes in Load unit*/
UOP_STORED, /*Executes in Data Store unit*/
UOP_STOREA, /*Executes in Address Store unit*/
UOP_FLOAD, /*Executes in Load unit*/
UOP_FSTORED, /*Executes in Data Store unit*/
UOP_FSTOREA, /*Executes in Address Store unit*/
UOP_MLOAD, /*Executes in Load unit*/
UOP_MSTORED, /*Executes in Data Store unit*/
UOP_MSTOREA, /*Executes in Address Store unit*/
UOP_FLOAT, /*Executes in Floating Point unit*/
UOP_MMX, /*Executes in Port 0 or 1 ALU units as MMX*/
UOP_MMX_SHIFT, /*Executes in Port 1 ALU unit. Uses MMX shifter*/
UOP_MMX_MUL, /*Executes in Port 0 ALU unit. Uses MMX multiplier*/
UOP_BRANCH, /*Executes in Branch unit*/
UOP_FXCH /*Does not require an execution unit*/
} uop_type_t;
typedef enum decode_type_t {
DECODE_SIMPLE,
DECODE_COMPLEX,
} decode_type_t;
#define MAX_UOPS 10
typedef struct p6_uop_t {
uop_type_t type;
int latency;
} p6_uop_t;
typedef struct macro_op_t {
int nr_uops;
decode_type_t decode_type;
p6_uop_t uop[MAX_UOPS];
} macro_op_t;
static const macro_op_t alu_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_ALU, .latency = 1}
};
static const macro_op_t alup0_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t load_alu_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t load_alup0_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t alu_store_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_STORED, .latency = 1},
.uop[3] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t alup0_store_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_STORED, .latency = 1},
.uop[3] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t branch_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_BRANCH, .latency = 2}
};
static const macro_op_t fxch_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FXCH, .latency = 1}
};
static const macro_op_t load_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_LOAD, .latency = 1}
};
static const macro_op_t store_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_STORED, .latency = 1},
.uop[1] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t bswap_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
};
static const macro_op_t leave_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t lods_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t loop_op = {
.nr_uops = 5,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1},
.uop[4] = { .type = UOP_BRANCH, .latency = 1}
};
static const macro_op_t mov_reg_seg_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
};
static const macro_op_t movs_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t pop_reg_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t pop_mem_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t push_imm_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_STORED, .latency = 1},
.uop[1] = { .type = UOP_STOREA, .latency = 1},
};
static const macro_op_t push_mem_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t push_seg_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t stos_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[1] = {.type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t test_reg_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1}
};
static const macro_op_t test_reg_b_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t test_mem_imm_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t test_mem_imm_b_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t xchg_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t mmx_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_MMX, .latency = 1}
};
static const macro_op_t mmx_mul_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_MMX_MUL, .latency = 1}
};
static const macro_op_t mmx_shift_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_MMX_SHIFT, .latency = 1}
};
static const macro_op_t load_mmx_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 2},
.uop[1] = { .type = UOP_MMX, .latency = 2}
};
static const macro_op_t load_mmx_mul_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 2},
.uop[1] = { .type = UOP_MMX_MUL, .latency = 2}
};
static const macro_op_t load_mmx_shift_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 2},
.uop[1] = { .type = UOP_MMX_SHIFT, .latency = 2}
};
static const macro_op_t mload_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_MLOAD, .latency = 1},
};
static const macro_op_t mstore_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_MSTORED, .latency = 1},
.uop[1] = { .type = UOP_MSTOREA, .latency = 1}
};
static const macro_op_t pmul_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_MMX_MUL, .latency = 1}
};
static const macro_op_t pmul_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 2},
.uop[1] = { .type = UOP_MMX_MUL, .latency = 2}
};
static const macro_op_t float_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FLOAT, .latency = 1}
};
static const macro_op_t fadd_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FLOAT, .latency = 2}
};
static const macro_op_t fmul_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_ALUP0, .latency = 3}
};
static const macro_op_t float2_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAT, .latency = 1},
.uop[1] = { .type = UOP_FLOAT, .latency = 1}
};
static const macro_op_t fchs_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAT, .latency = 2},
.uop[1] = { .type = UOP_FLOAT, .latency = 2},
.uop[2] = { .type = UOP_FLOAT, .latency = 2}
};
static const macro_op_t load_float_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAD, .latency = 1},
.uop[1] = { .type = UOP_FLOAT, .latency = 1}
};
static const macro_op_t load_fadd_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAD, .latency = 1},
.uop[1] = { .type = UOP_FLOAT, .latency = 2}
};
static const macro_op_t load_fmul_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 4}
};
static const macro_op_t fstore_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FSTORED, .latency = 1},
.uop[1] = { .type = UOP_FSTOREA, .latency = 1},
};
static const macro_op_t load_fiadd_op = {
.nr_uops = 7,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAD, .latency = 1},
.uop[1] = { .type = UOP_FLOAT, .latency = 1},
.uop[2] = { .type = UOP_FLOAT, .latency = 1},
.uop[3] = { .type = UOP_FLOAT, .latency = 1},
.uop[4] = { .type = UOP_FLOAT, .latency = 1},
.uop[5] = { .type = UOP_FLOAT, .latency = 1},
.uop[6] = { .type = UOP_FLOAT, .latency = 1}
};
static const macro_op_t fdiv_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FLOAT, .latency = 37}
};
static const macro_op_t fdiv_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAD, .latency = 1 },
.uop[1] = { .type = UOP_FLOAT, .latency = 37}
};
static const macro_op_t fsin_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FLOAT, .latency = 62}
};
static const macro_op_t fsqrt_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FLOAT, .latency = 69}
};
static const macro_op_t fldcw_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_FLOAT, .latency = 10}
};
static const macro_op_t complex_float_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAT, .latency = 1}
};
static const macro_op_t complex_float_l_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAT, .latency = 50}
};
static const macro_op_t flde_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAD, .latency = 1},
.uop[1] = { .type = UOP_FLOAD, .latency = 1},
.uop[2] = { .type = UOP_FLOAT, .latency = 2}
};
static const macro_op_t fste_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_FLOAT, .latency = 2},
.uop[1] = { .type = UOP_FSTORED, .latency = 1},
.uop[2] = { .type = UOP_FSTOREA, .latency = 1}
};
static const macro_op_t complex_alu1_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1}
};
static const macro_op_t alu2_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t alu3_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t alu6_op = {
.nr_uops = 6,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1},
.uop[4] = { .type = UOP_ALU, .latency = 1},
.uop[5] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t complex_alup0_1_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t alup0_3_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t alup0_6_op = {
.nr_uops = 6,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_ALUP0, .latency = 1},
.uop[3] = { .type = UOP_ALUP0, .latency = 1},
.uop[4] = { .type = UOP_ALUP0, .latency = 1},
.uop[5] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t arpl_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 3},
.uop[1] = { .type = UOP_ALU, .latency = 3}
};
static const macro_op_t bound_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_LOAD, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t bsx_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 10}
};
static const macro_op_t call_far_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 3},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
.uop[3] = { .type = UOP_BRANCH, .latency = 1}
};
static const macro_op_t cli_sti_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 7}
};
static const macro_op_t cmps_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t cmpsb_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t cmpxchg_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1},
.uop[2] = { .type = UOP_STORED, .latency = 1},
.uop[3] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t cmpxchg_b_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_STORED, .latency = 1},
.uop[3] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t complex_push_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_STORED, .latency = 1},
.uop[1] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t cpuid_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 23}
};
static const macro_op_t div16_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 21}
};
static const macro_op_t div16_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1 },
.uop[1] = { .type = UOP_ALUP0, .latency = 21}
};
static const macro_op_t div32_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 37}
};
static const macro_op_t div32_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1 },
.uop[1] = { .type = UOP_ALUP0, .latency = 37}
};
static const macro_op_t emms_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 50}
};
static const macro_op_t enter_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_STORED, .latency = 1 },
.uop[1] = { .type = UOP_STOREA, .latency = 1 },
.uop[2] = { .type = UOP_ALU, .latency = 10}
};
static const macro_op_t femms_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 6}
};
static const macro_op_t in_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 18}
};
static const macro_op_t ins_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 18},
.uop[1] = { .type = UOP_STORED, .latency = 1 },
.uop[2] = { .type = UOP_STOREA, .latency = 1 },
.uop[3] = { .type = UOP_ALU, .latency = 1 }
};
static const macro_op_t int_op = {
.nr_uops = 8,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 20},
.uop[1] = { .type = UOP_STORED, .latency = 1 },
.uop[2] = { .type = UOP_STOREA, .latency = 1 },
.uop[3] = { .type = UOP_STORED, .latency = 1 },
.uop[4] = { .type = UOP_STOREA, .latency = 1 },
.uop[5] = { .type = UOP_STORED, .latency = 1 },
.uop[6] = { .type = UOP_STOREA, .latency = 1 },
.uop[7] = { .type = UOP_BRANCH, .latency = 1 }
};
static const macro_op_t iret_op = {
.nr_uops = 5,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 3 },
.uop[1] = { .type = UOP_LOAD, .latency = 3 },
.uop[2] = { .type = UOP_LOAD, .latency = 3 },
.uop[3] = { .type = UOP_ALU, .latency = 20},
.uop[4] = { .type = UOP_BRANCH, .latency = 1 }
};
static const macro_op_t invd_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 500}
};
static const macro_op_t jmp_far_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 3},
.uop[1] = { .type = UOP_BRANCH, .latency = 1}
};
static const macro_op_t lss_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_LOAD, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 3}
};
static const macro_op_t mov_mem_seg_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
};
static const macro_op_t mov_seg_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 3}
};
static const macro_op_t mov_seg_reg_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 3}
};
static const macro_op_t mul_op = {
.nr_uops = 1,
.decode_type = DECODE_SIMPLE,
.uop[0] = {.type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t mul_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t mul64_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t mul64_mem_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_ALUP0, .latency = 1},
.uop[3] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t out_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 18}
};
static const macro_op_t outs_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1 },
.uop[1] = { .type = UOP_ALU, .latency = 18}
};
static const macro_op_t pusha_op = {
.nr_uops = 8,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_STORED, .latency = 2},
.uop[1] = { .type = UOP_STOREA, .latency = 2},
.uop[2] = { .type = UOP_STORED, .latency = 2},
.uop[3] = { .type = UOP_STOREA, .latency = 2},
.uop[4] = { .type = UOP_STORED, .latency = 2},
.uop[5] = { .type = UOP_STOREA, .latency = 2},
.uop[6] = { .type = UOP_STORED, .latency = 2},
.uop[7] = { .type = UOP_STOREA, .latency = 2}
};
static const macro_op_t popa_op = {
.nr_uops = 8,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_LOAD, .latency = 1},
.uop[2] = { .type = UOP_LOAD, .latency = 1},
.uop[3] = { .type = UOP_LOAD, .latency = 1},
.uop[4] = { .type = UOP_LOAD, .latency = 1},
.uop[5] = { .type = UOP_LOAD, .latency = 1},
.uop[6] = { .type = UOP_LOAD, .latency = 1},
.uop[7] = { .type = UOP_LOAD, .latency = 1}
};
static const macro_op_t popf_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1 },
.uop[1] = { .type = UOP_ALU, .latency = 6 },
.uop[2] = { .type = UOP_ALUP0, .latency = 10}
};
static const macro_op_t pushf_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1}
};
static const macro_op_t ret_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_BRANCH, .latency = 1}
};
static const macro_op_t retf_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 3},
.uop[2] = { .type = UOP_BRANCH, .latency = 1}
};
static const macro_op_t scas_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t scasb_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t setcc_mem_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_FSTORED, .latency = 1},
.uop[3] = { .type = UOP_FSTOREA, .latency = 1}
};
static const macro_op_t setcc_reg_op = {
.nr_uops = 3,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALUP0, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1},
.uop[2] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t test_mem_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t test_mem_b_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_ALUP0, .latency = 1}
};
static const macro_op_t xchg_mem_op = {
.nr_uops = 4,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_LOAD, .latency = 1},
.uop[1] = { .type = UOP_STORED, .latency = 1},
.uop[2] = { .type = UOP_STOREA, .latency = 1},
.uop[3] = { .type = UOP_ALU, .latency = 1}
};
static const macro_op_t xlat_op = {
.nr_uops = 2,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 1},
.uop[1] = { .type = UOP_LOAD, .latency = 1}
};
static const macro_op_t wbinvd_op = {
.nr_uops = 1,
.decode_type = DECODE_COMPLEX,
.uop[0] = {.type = UOP_ALU, .latency = 10000}
};
#define INVALID NULL
static const macro_op_t *opcode_timings_p6[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ &alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* ADD ADD PUSH ES POP ES*/
&alup0_op, &alu_op, &push_seg_op, &mov_seg_mem_op,
/* OR OR OR OR*/
&alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* OR OR PUSH CS */
&alup0_op, &alu_op, &push_seg_op, INVALID,
/* ADC ADC ADC ADC*/
/*10*/ &alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* ADC ADC PUSH SS POP SS*/
&complex_alup0_1_op, &complex_alu1_op, &push_seg_op, &mov_seg_mem_op,
/* SBB SBB SBB SBB*/
/*10*/ &alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* SBB SBB PUSH DS POP DS*/
&complex_alup0_1_op, &complex_alu1_op, &push_seg_op, &mov_seg_mem_op,
/* AND AND AND AND*/
/*20*/ &alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* AND AND DAA*/
&alup0_op, &alu_op, INVALID, &complex_alup0_1_op,
/* SUB SUB SUB SUB*/
&alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* SUB SUB DAS*/
&alup0_op, &alu_op, INVALID, &complex_alup0_1_op,
/* XOR XOR XOR XOR*/
/*30*/ &alup0_store_op, &alu_store_op, &load_alup0_op, &load_alu_op,
/* XOR XOR AAA*/
&alup0_op, &alu_op, INVALID, &alup0_6_op,
/* CMP CMP CMP CMP*/
&load_alup0_op, &load_alu_op, &load_alup0_op, &load_alu_op,
/* CMP CMP AAS*/
&alup0_op, &alu_op, INVALID, &alup0_6_op,
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ &alu_op, &alu_op, &alu_op, &alu_op,
/* INC ESP INC EBP INC ESI INC EDI*/
&alu_op, &alu_op, &alu_op, &alu_op,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
&alu_op, &alu_op, &alu_op, &alu_op,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
&alu_op, &alu_op, &alu_op, &alu_op,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ &store_op, &store_op, &store_op, &store_op,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
&store_op, &store_op, &store_op, &store_op,
/* POP EAX POP ECX POP EDX POP EBX*/
&pop_reg_op, &pop_reg_op, &pop_reg_op, &pop_reg_op,
/* POP ESP POP EBP POP ESI POP EDI*/
&pop_reg_op, &pop_reg_op, &pop_reg_op, &pop_reg_op,
/* PUSHA POPA BOUND ARPL*/
/*60*/ &pusha_op, &popa_op, &bound_op, &arpl_op,
INVALID, INVALID, INVALID, INVALID,
/* PUSH imm IMUL PUSH imm IMUL*/
&push_imm_op, &mul_op, &push_imm_op, &mul_op,
/* INSB INSW OUTSB OUTSW*/
&ins_op, &ins_op, &outs_op, &outs_op,
/* Jxx*/
/*70*/ &branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
/*80*/ INVALID, INVALID, INVALID, INVALID,
/* TEST TEST XCHG XCHG*/
&test_mem_b_op, &test_mem_op, &xchg_mem_op, &xchg_mem_op,
/* MOV MOV MOV MOV*/
&store_op, &store_op, &load_op, &load_op,
/* MOV from seg LEA MOV to seg POP*/
&mov_mem_seg_op, &store_op, &mov_seg_mem_op, &pop_mem_op,
/* NOP XCHG XCHG XCHG*/
/*90*/ &fxch_op, &xchg_op, &xchg_op, &xchg_op,
/* XCHG XCHG XCHG XCHG*/
&xchg_op, &xchg_op, &xchg_op, &xchg_op,
/* CBW CWD CALL far WAIT*/
&complex_alu1_op, &complex_alu1_op, &call_far_op, &fxch_op,
/* PUSHF POPF SAHF LAHF*/
&pushf_op, &popf_op, &complex_alup0_1_op, &complex_alup0_1_op,
/* MOV MOV MOV MOV*/
/*a0*/ &load_op, &load_op, &store_op, &store_op,
/* MOVSB MOVSW CMPSB CMPSW*/
&movs_op, &movs_op, &cmpsb_op, &cmps_op,
/* TEST TEST STOSB STOSW*/
&test_reg_b_op, &test_reg_op, &stos_op, &stos_op,
/* LODSB LODSW SCASB SCASW*/
&lods_op, &lods_op, &scasb_op, &scas_op,
/* MOV*/
/*b0*/ &alu_op, &alu_op, &alu_op, &alu_op,
&alu_op, &alu_op, &alu_op, &alu_op,
&alu_op, &alu_op, &alu_op, &alu_op,
&alu_op, &alu_op, &alu_op, &alu_op,
/* RET imm RET*/
/*c0*/ INVALID, INVALID, &ret_op, &ret_op,
/* LES LDS MOV MOV*/
&lss_op, &lss_op, &store_op, &store_op,
/* ENTER LEAVE RETF RETF*/
&enter_op, &leave_op, &retf_op, &retf_op,
/* INT3 INT INTO IRET*/
&int_op, &int_op, &int_op, &iret_op,
/*d0*/ INVALID, INVALID, INVALID, INVALID,
/* AAM AAD SETALC XLAT*/
&alup0_6_op, &alup0_3_op, &complex_alup0_1_op, &xlat_op,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ &loop_op, &loop_op, &loop_op, &loop_op,
/* IN AL IN AX OUT_AL OUT_AX*/
&in_op, &in_op, &out_op, &out_op,
/* CALL JMP JMP JMP*/
&store_op, &branch_op, &jmp_far_op, &branch_op,
/* IN AL IN AX OUT_AL OUT_AX*/
&in_op, &in_op, &out_op, &out_op,
/* REPNE REPE*/
/*f0*/ INVALID, INVALID, INVALID, INVALID,
/* HLT CMC*/
&complex_alup0_1_op, &alu2_op, INVALID, INVALID,
/* CLC STC CLI STI*/
&complex_alu1_op, &complex_alu1_op, &cli_sti_op, &cli_sti_op,
/* CLD STD INCDEC*/
&complex_alu1_op, &complex_alu1_op, &alup0_store_op, INVALID
// clang-format on
};
static const macro_op_t *opcode_timings_p6_mod3[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ &alup0_op, &alu_op, &alup0_op, &alu_op,
/* ADD ADD PUSH ES POP ES*/
&alup0_op, &alu_op, &push_seg_op, &mov_seg_mem_op,
/* OR OR OR OR*/
&alup0_op, &alu_op, &alup0_op, &alu_op,
/* OR OR PUSH CS */
&alup0_op, &alu_op, &push_seg_op, INVALID,
/* ADC ADC ADC ADC*/
/*10*/ &complex_alup0_1_op, &complex_alu1_op, &complex_alup0_1_op, &complex_alu1_op,
/* ADC ADC PUSH SS POP SS*/
&complex_alup0_1_op, &complex_alu1_op, &push_seg_op, &mov_seg_mem_op,
/* SBB SBB SBB SBB*/
&complex_alup0_1_op, &complex_alu1_op, &complex_alup0_1_op, &complex_alu1_op,
/* SBB SBB PUSH DS POP DS*/
&complex_alup0_1_op, &complex_alu1_op, &push_seg_op, &mov_seg_mem_op,
/* AND AND AND AND*/
/*20*/ &alup0_op, &alu_op, &alup0_op, &alu_op,
/* AND AND DAA*/
&alup0_op, &alu_op, INVALID, &complex_alup0_1_op,
/* SUB SUB SUB SUB*/
&alup0_op, &alu_op, &alup0_op, &alu_op,
/* SUB SUB DAS*/
&alup0_op, &alu_op, INVALID, &complex_alup0_1_op,
/* XOR XOR XOR XOR*/
/*30*/ &alup0_op, &alu_op, &alup0_op, &alu_op,
/* XOR XOR AAA*/
&alup0_op, &alu_op, INVALID, &alup0_6_op,
/* CMP CMP CMP CMP*/
&alup0_op, &alu_op, &alup0_op, &alu_op,
/* CMP CMP AAS*/
&alup0_op, &alu_op, INVALID, &alup0_6_op,
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ &alu_op, &alu_op, &alu_op, &alu_op,
/* INC ESP INC EBP INC ESI INC EDI*/
&alu_op, &alu_op, &alu_op, &alu_op,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
&alu_op, &alu_op, &alu_op, &alu_op,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
&alu_op, &alu_op, &alu_op, &alu_op,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ &store_op, &store_op, &store_op, &store_op,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
&store_op, &store_op, &store_op, &store_op,
/* POP EAX POP ECX POP EDX POP EBX*/
&pop_reg_op, &pop_reg_op, &pop_reg_op, &pop_reg_op,
/* POP ESP POP EBP POP ESI POP EDI*/
&pop_reg_op, &pop_reg_op, &pop_reg_op, &pop_reg_op,
/* PUSHA POPA BOUND ARPL*/
/*60*/ &pusha_op, &popa_op, &bound_op, &arpl_op,
INVALID, INVALID, INVALID, INVALID,
/* PUSH imm IMUL PUSH imm IMUL*/
&push_imm_op, &mul_op, &push_imm_op, &mul_op,
/* INSB INSW OUTSB OUTSW*/
&ins_op, &ins_op, &outs_op, &outs_op,
/* Jxx*/
/*70*/ &branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
/*80*/ INVALID, INVALID, INVALID, INVALID,
/* TEST TEST XCHG XCHG*/
&complex_alu1_op, &complex_alu1_op, &alu3_op, &alu3_op,
/* MOV MOV MOV MOV*/
&store_op, &store_op, &load_op, &load_op,
/* MOV from seg LEA MOV to seg POP*/
&mov_reg_seg_op, &store_op, &mov_seg_reg_op, &pop_reg_op,
/* NOP XCHG XCHG XCHG*/
/*90*/ &fxch_op, &xchg_op, &xchg_op, &xchg_op,
/* XCHG XCHG XCHG XCHG*/
&xchg_op, &xchg_op, &xchg_op, &xchg_op,
/* CBW CWD CALL far WAIT*/
&complex_alu1_op, &complex_alu1_op, &call_far_op, &fxch_op,
/* PUSHF POPF SAHF LAHF*/
&pushf_op, &popf_op, &complex_alup0_1_op, &complex_alup0_1_op,
/* MOV MOV MOV MOV*/
/*a0*/ &load_op, &load_op, &store_op, &store_op,
/* MOVSB MOVSW CMPSB CMPSW*/
&movs_op, &movs_op, &cmpsb_op, &cmps_op,
/* TEST TEST STOSB STOSW*/
&test_reg_b_op, &test_reg_op, &stos_op, &stos_op,
/* LODSB LODSW SCASB SCASW*/
&lods_op, &lods_op, &scasb_op, &scas_op,
/* MOV*/
/*b0*/ &alu_op, &alu_op, &alu_op, &alu_op,
&alu_op, &alu_op, &alu_op, &alu_op,
&alu_op, &alu_op, &alu_op, &alu_op,
&alu_op, &alu_op, &alu_op, &alu_op,
/* RET imm RET*/
/*c0*/ INVALID, INVALID, &ret_op, &ret_op,
/* LES LDS MOV MOV*/
&lss_op, &lss_op, &store_op, &store_op,
/* ENTER LEAVE RETF RETF*/
&enter_op, &leave_op, &retf_op, &retf_op,
/* INT3 INT INTO IRET*/
&int_op, &int_op, &int_op, &iret_op,
/*d0*/ INVALID, INVALID, INVALID, INVALID,
/* AAM AAD SETALC XLAT*/
&alup0_6_op, &alup0_3_op, &complex_alup0_1_op, &xlat_op,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ &loop_op, &loop_op, &loop_op, &loop_op,
/* IN AL IN AX OUT_AL OUT_AX*/
&in_op, &in_op, &out_op, &out_op,
/* CALL JMP JMP JMP*/
&store_op, &branch_op, &jmp_far_op, &branch_op,
/* IN AL IN AX OUT_AL OUT_AX*/
&in_op, &in_op, &out_op, &out_op,
/* REPNE REPE*/
/*f0*/ INVALID, INVALID, INVALID, INVALID,
/* HLT CMC*/
&complex_alup0_1_op, &alu2_op, INVALID, INVALID,
/* CLC STC CLI STI*/
&complex_alu1_op, &complex_alu1_op, &cli_sti_op, &cli_sti_op,
/* CLD STD INCDEC*/
&complex_alu1_op, &complex_alu1_op, &complex_alup0_1_op, INVALID
// clang-format on
};
static const macro_op_t *opcode_timings_p6_0f[256] = {
// clang-format off
/*00*/ &alu6_op, &alu6_op, &alu6_op, &alu6_op,
INVALID, &alu6_op, &alu6_op, INVALID,
&invd_op, &wbinvd_op, INVALID, INVALID,
INVALID, &load_op, &femms_op, INVALID,
/*10*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*20*/ &alu6_op, &alu6_op, &alu6_op, &alu6_op,
&alu6_op, &alu6_op, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*30*/ &alu6_op, &alu6_op, &alu6_op, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*40*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*50*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*60*/ &load_mmx_op, &load_mmx_op, &load_mmx_op, &load_mmx_op,
&load_mmx_op, &load_mmx_op, &load_mmx_op, &load_mmx_op,
&load_mmx_op, &load_mmx_op, &load_mmx_op, &load_mmx_op,
INVALID, INVALID, &mload_op, &mload_op,
/*70*/ INVALID, &load_mmx_shift_op, &load_mmx_shift_op, &load_mmx_shift_op,
&load_mmx_op, &load_mmx_op, &load_mmx_op, &emms_op,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, &mstore_op, &mstore_op,
/*80*/ &branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
/*90*/ &setcc_reg_op, &setcc_reg_op, &setcc_reg_op, &setcc_reg_op,
&setcc_reg_op, &setcc_reg_op, &setcc_reg_op, &setcc_reg_op,
&setcc_reg_op, &setcc_reg_op, &setcc_reg_op, &setcc_reg_op,
&setcc_reg_op, &setcc_reg_op, &setcc_reg_op, &setcc_reg_op,
/*a0*/ &push_seg_op, &mov_seg_mem_op, &cpuid_op, &load_alu_op,
&alu_store_op, &alu_store_op, INVALID, INVALID,
&push_seg_op, &mov_seg_mem_op, INVALID, &load_alu_op,
&alu_store_op, &alu_store_op, INVALID, &mul_op,
/*b0*/ &cmpxchg_b_op, &cmpxchg_op, &lss_op, &load_alu_op,
&lss_op, &lss_op, &load_alup0_op, &load_alu_op,
INVALID, INVALID, &load_alu_op, &load_alu_op,
&bsx_op, &bsx_op, &load_alup0_op, &load_alu_op,
/*c0*/ &alup0_store_op, &alu_store_op, INVALID, INVALID,
INVALID, INVALID, INVALID, &cmpxchg_op,
&bswap_op, &bswap_op, &bswap_op, &bswap_op,
&bswap_op, &bswap_op, &bswap_op, &bswap_op,
/*d0*/ INVALID, &load_mmx_shift_op, &load_mmx_shift_op, &load_mmx_shift_op,
INVALID, &load_mmx_mul_op, INVALID, INVALID,
&load_mmx_op, &load_mmx_op, INVALID, &load_mmx_op,
&load_mmx_op, &load_mmx_op, INVALID, &load_mmx_op,
/*e0*/ &load_mmx_op, &load_mmx_shift_op, &load_mmx_shift_op, INVALID,
INVALID, &pmul_mem_op, INVALID, INVALID,
&load_mmx_op, &load_mmx_op, INVALID, &load_mmx_op,
&load_mmx_op, &load_mmx_op, INVALID, &load_mmx_op,
/*f0*/ INVALID, &load_mmx_shift_op, &load_mmx_shift_op, &load_mmx_shift_op,
INVALID, &pmul_mem_op, INVALID, INVALID,
&load_mmx_op, &load_mmx_op, &load_mmx_op, INVALID,
&load_mmx_op, &load_mmx_op, &load_mmx_op, INVALID,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_0f_mod3[256] = {
// clang-format off
/*00*/ &alu6_op, &alu6_op, &alu6_op, &alu6_op,
INVALID, &alu6_op, &alu6_op, INVALID,
&invd_op, &wbinvd_op, INVALID, INVALID,
INVALID, INVALID, &femms_op, INVALID,
/*10*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*20*/ &alu6_op, &alu6_op, &alu6_op, &alu6_op,
&alu6_op, &alu6_op, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*30*/ &alu6_op, &alu6_op, &alu6_op, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*40*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*50*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*60*/ &mmx_op, &mmx_op, &mmx_op, &mmx_op,
&mmx_op, &mmx_op, &mmx_op, &mmx_op,
&mmx_op, &mmx_op, &mmx_op, &mmx_op,
INVALID, INVALID, &mmx_op, &mmx_op,
/*70*/ INVALID, &mmx_shift_op, &mmx_shift_op, &mmx_shift_op,
&mmx_op, &mmx_op, &mmx_op, &emms_op,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, &mmx_op, &mmx_op,
/*80*/ &branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
&branch_op, &branch_op, &branch_op, &branch_op,
/*90*/ &setcc_mem_op, &setcc_mem_op, &setcc_mem_op, &setcc_mem_op,
&setcc_mem_op, &setcc_mem_op, &setcc_mem_op, &setcc_mem_op,
&setcc_mem_op, &setcc_mem_op, &setcc_mem_op, &setcc_mem_op,
&setcc_mem_op, &setcc_mem_op, &setcc_mem_op, &setcc_mem_op,
/*a0*/ &push_seg_op, &mov_seg_mem_op, &cpuid_op, &complex_alu1_op,
&complex_alu1_op, &complex_alu1_op, INVALID, INVALID,
&push_seg_op, &mov_seg_mem_op, INVALID, &complex_alu1_op,
&complex_alu1_op, &complex_alu1_op, INVALID, &mul_op,
/*b0*/ &cmpxchg_b_op, &cmpxchg_op, &lss_op, &complex_alu1_op,
&lss_op, &lss_op, &alup0_op, &alu_op,
INVALID, INVALID, &complex_alu1_op, &complex_alu1_op,
&bsx_op, &bsx_op, &alup0_op, &alu_op,
/*c0*/ &complex_alup0_1_op, &complex_alu1_op, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
&bswap_op, &bswap_op, &bswap_op, &bswap_op,
&bswap_op, &bswap_op, &bswap_op, &bswap_op,
/*d0*/ INVALID, &mmx_shift_op, &mmx_shift_op, &mmx_shift_op,
INVALID, &mmx_mul_op, INVALID, INVALID,
&mmx_op, &mmx_op, INVALID, &mmx_op,
&mmx_op, &mmx_op, INVALID, &mmx_op,
/*e0*/ &mmx_op, &mmx_shift_op, &mmx_shift_op, INVALID,
INVALID, &pmul_op, INVALID, INVALID,
&mmx_op, &mmx_op, INVALID, &mmx_op,
&mmx_op, &mmx_op, INVALID, &mmx_op,
/*f0*/ INVALID, &mmx_shift_op, &mmx_shift_op, &mmx_shift_op,
INVALID, &pmul_op, INVALID, INVALID,
&mmx_op, &mmx_op, &mmx_op, INVALID,
&mmx_op, &mmx_op, &mmx_op, INVALID,
};
static const macro_op_t *opcode_timings_p6_shift[8] =
{
// clang-format off
&alu_store_op, &alu_store_op, &alu_store_op, &alu_store_op,
&alu_store_op, &alu_store_op, &alu_store_op, &alu_store_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_shift_b[8] = {
// clang-format off
&alup0_store_op, &alup0_store_op, &alup0_store_op, &alup0_store_op,
&alup0_store_op, &alup0_store_op, &alup0_store_op, &alup0_store_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_shift_mod3[8] = {
// clang-format off
&complex_alu1_op, &complex_alu1_op, &complex_alu1_op, &complex_alu1_op,
&alu_op, &alu_op, &alu_op, &alu_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_shift_b_mod3[8] = {
// clang-format off
&complex_alup0_1_op, &complex_alup0_1_op, &complex_alup0_1_op, &complex_alup0_1_op,
&alup0_op, &alup0_op, &alup0_op, &alup0_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_80[8] = {
// clang-format off
&alup0_store_op, &alup0_store_op, &alup0_store_op, &alup0_store_op,
&alup0_store_op, &alup0_store_op, &alup0_store_op, &alup0_store_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_80_mod3[8] = {
// clang-format off
&alup0_op, &alup0_op, &alup0_store_op, &alup0_store_op,
&alup0_op, &alup0_op, &alup0_op, &alup0_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_8x[8] = {
// clang-format off
&alu_store_op, &alu_store_op, &alu_store_op, &alu_store_op,
&alu_store_op, &alu_store_op, &alu_store_op, &alu_store_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_8x_mod3[8] = {
// clang-format off
&alu_op, &alu_op, &alu_store_op, &alu_store_op,
&alu_op, &alu_op, &alu_op, &alu_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_f6[8] = {
// clang-format off
/* TST NOT NEG*/
&test_mem_imm_b_op, INVALID, &alup0_store_op, &alup0_store_op,
/* MUL IMUL DIV IDIV*/
&mul_mem_op, &mul_mem_op, &div16_mem_op, &div16_mem_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_f6_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
&test_reg_b_op, INVALID, &alup0_op, &alup0_op,
/* MUL IMUL DIV IDIV*/
&mul_op, &mul_op, &div16_op, &div16_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_f7[8] = {
// clang-format off
/* TST NOT NEG*/
&test_mem_imm_op, INVALID, &alu_store_op, &alu_store_op,
/* MUL IMUL DIV IDIV*/
&mul64_mem_op, &mul64_mem_op, &div32_mem_op, &div32_mem_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_f7_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
&test_reg_op, INVALID, &alu_op, &alu_op,
/* MUL IMUL DIV IDIV*/
&mul64_op, &mul64_op, &div32_op, &div32_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_ff[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
&alu_store_op, &alu_store_op, &store_op, &call_far_op,
/* JMP JMP far PUSH*/
&branch_op, &jmp_far_op, &push_mem_op, INVALID
// clang-format on
};
static const macro_op_t *opcode_timings_p6_ff_mod3[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
&complex_alu1_op, &complex_alu1_op, &store_op, &call_far_op,
/* JMP JMP far PUSH*/
&branch_op, &jmp_far_op, &complex_push_mem_op, INVALID
// clang-format on
};
static const macro_op_t *opcode_timings_p6_d8[8] = {
// clang-format off
/* FADDs FMULs FCOMs FCOMPs*/
&load_fadd_op, &load_fmul_op, &load_float_op, &load_float_op,
/* FSUBs FSUBRs FDIVs FDIVRs*/
&load_float_op, &load_float_op, &fdiv_mem_op, &fdiv_mem_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP*/
&fadd_op, &fmul_op, &float_op, &float_op,
/* FSUB FSUBR FDIV FDIVR*/
&float_op, &float_op, &fdiv_op, &fdiv_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs*/
&load_float_op, INVALID, &fstore_op, &fstore_op,
/* FLDENV FLDCW FSTENV FSTCW*/
&complex_float_l_op, &fldcw_op, &complex_float_l_op, &complex_float_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_d9_mod3[64] = {
// clang-format off
/*FLD*/
&float_op, &float_op, &float_op, &float_op,
&float_op, &float_op, &float_op, &float_op,
/*FXCH*/
&fxch_op, &fxch_op, &fxch_op, &fxch_op,
&fxch_op, &fxch_op, &fxch_op, &fxch_op,
/*FNOP*/
&float_op, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*FSTP*/
&float2_op, &float2_op, &float2_op, &float2_op,
&float2_op, &float2_op, &float2_op, &float2_op,
/* opFCHS opFABS*/
&fchs_op, &float_op, INVALID, INVALID,
/* opFTST opFXAM*/
&float_op, &float_op, INVALID, INVALID,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI*/
&float_op, &float_op, &float_op, &float_op,
/* opFLDEG2 opFLDLN2 opFLDZ*/
&float_op, &float_op, &float_op, INVALID,
/* opF2XM1 opFYL2X opFPTAN opFPATAN*/
&fsin_op, &fsin_op, &fsin_op, &fsin_op,
/* opFDECSTP opFINCSTP,*/
INVALID, INVALID, &float_op, &float_op,
/* opFPREM opFSQRT opFSINCOS*/
&fdiv_op, INVALID, &fsqrt_op, &fsin_op,
/* opFRNDINT opFSCALE opFSIN opFCOS*/
&float_op, &fdiv_op, &fsin_op, &fsin_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_da[8] = {
// clang-format off
/* FIADDl FIMULl FICOMl FICOMPl*/
&load_fadd_op, &load_fmul_op, &load_float_op, &load_float_op,
/* FISUBl FISUBRl FIDIVl FIDIVRl*/
&load_float_op, &load_float_op, &fdiv_mem_op, &fdiv_mem_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_da_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FCOMPP*/
INVALID, &float_op, INVALID, INVALID
// clang-format on
};
static const macro_op_t *opcode_timings_p6_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil*/
&load_float_op, INVALID, &fstore_op, &fstore_op,
/* FLDe FSTPe*/
INVALID, &flde_op, INVALID, &fste_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_db_mod3[64] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* opFNOP opFCLEX opFINIT*/
INVALID, &float_op, &float_op, &float_op,
/* opFNOP opFNOP*/
&float_op, &float_op, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_dc[8] = {
// clang-format off
/* FADDd FMULd FCOMd FCOMPd*/
&load_fadd_op, &load_fmul_op, &load_float_op, &load_float_op,
/* FSUBd FSUBRd FDIVd FDIVRd*/
&load_float_op, &load_float_op, &fdiv_mem_op, &fdiv_mem_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr*/
&fadd_op, &fmul_op, INVALID, INVALID,
/* opFSUBRr opFSUBr opFDIVRr opFDIVr*/
&float_op, &float_op, &fdiv_op, &fdiv_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd*/
&load_float_op, INVALID, &fstore_op, &fstore_op,
/* FRSTOR FSAVE FSTSW*/
&complex_float_l_op, INVALID, &complex_float_l_op, &complex_float_l_op
// clang-format on
};
static const macro_op_t *opcode_timings_p6_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP*/
&float_op, INVALID, &float_op, &float_op,
/* FUCOM FUCOMP*/
&float_op, &float_op, INVALID, INVALID
// clang-format on
};
static const macro_op_t *opcode_timings_p6_de[8] = {
// clang-format off
/* FIADDw FIMULw FICOMw FICOMPw*/
&load_fiadd_op, &load_fiadd_op, &load_fiadd_op, &load_fiadd_op,
/* FISUBw FISUBRw FIDIVw FIDIVRw*/
&load_fiadd_op, &load_fiadd_op, &load_fiadd_op, &load_fiadd_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_de_mod3[8] = {
// clang-format off
/* FADDP FMULP FCOMPP*/
&fadd_op, &fmul_op, INVALID, &float_op,
/* FSUBP FSUBRP FDIVP FDIVRP*/
&float_op, &float_op, &fdiv_op, &fdiv_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw*/
&load_float_op, INVALID, &fstore_op, &fstore_op,
/* FILDiq FBSTP FISTPiq*/
INVALID, &load_float_op, &complex_float_l_op, &fstore_op,
// clang-format on
};
static const macro_op_t *opcode_timings_p6_df_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FSTSW AX*/
&float_op, INVALID, INVALID, INVALID
// clang-format on
};
static uint8_t last_prefix;
static int prefixes;
static int decode_timestamp;
static int last_complete_timestamp;
typedef struct p6_unit_t {
uint32_t uop_mask;
double first_available_cycle;
} p6_unit_t;
static int nr_units;
static p6_unit_t *units;
/*Pentium Pro has no MMX*/
static p6_unit_t ppro_units[] = {
{ .uop_mask = (1 << UOP_ALU) | (1 << UOP_ALUP0) | (1 << UOP_FLOAT) }, /*Port 0*/
{ .uop_mask = (1 << UOP_ALU) | (1 << UOP_BRANCH) }, /*Port 1*/
{ .uop_mask = (1 << UOP_LOAD) | (1 << UOP_FLOAD) }, /*Port 2*/
{ .uop_mask = (1 << UOP_STORED) | (1 << UOP_FSTORED) }, /*Port 3*/
{ .uop_mask = (1 << UOP_STOREA) | (1 << UOP_FSTOREA) }, /*Port 4*/
};
#define NR_PPRO_UNITS (sizeof(ppro_units) / sizeof(p6_unit_t))
/*Pentium II/Celeron assigns the multiplier to port 0, the shifter to port 1, and shares the MMX ALU*/
static p6_unit_t p2_units[] = {
{ .uop_mask = (1 << UOP_ALU) | (1 << UOP_ALUP0) | (1 << UOP_FLOAT) | /*Port 0*/
(1 << UOP_MMX) | (1 << UOP_MMX_MUL) },
{ .uop_mask = (1 << UOP_ALU) | (1 << UOP_BRANCH) | /*Port 1*/
(1 << UOP_MMX) | (1 << UOP_MMX_SHIFT) },
{ .uop_mask = (1 << UOP_LOAD) | (1 << UOP_FLOAD) | (1 << UOP_MLOAD) }, /*Port 2*/
{ .uop_mask = (1 << UOP_STORED) | (1 << UOP_FSTORED) | (1 << UOP_MSTORED) }, /*Port 3*/
{ .uop_mask = (1 << UOP_STOREA) | (1 << UOP_FSTOREA) | (1 << UOP_MSTOREA) }, /*Port 4*/
};
#define NR_P2_UNITS (sizeof(p2_units) / sizeof(p6_unit_t))
static int
uop_run(const p6_uop_t *uop, int decode_time)
{
p6_unit_t *best_unit = NULL;
int best_start_cycle = 99999;
/*UOP_FXCH does not require execution*/
if (uop->type == UOP_FXCH)
return decode_time;
/*Find execution unit for this uOP*/
for (int c = 0; c < nr_units; c++) {
if (units[c].uop_mask & (1 << uop->type)) {
if (units[c].first_available_cycle < best_start_cycle) {
best_unit = &units[c];
best_start_cycle = units[c].first_available_cycle;
}
}
}
if (!best_unit)
fatal("uop_run: can not find execution unit\n");
if (best_start_cycle < decode_time)
best_start_cycle = decode_time;
best_unit->first_available_cycle = best_start_cycle + uop->latency;
return best_start_cycle + uop->latency;
}
/*The P6 decoders can decode, per clock :
- 1 to 3 'simple' instructions, each up to 1 uOP and 7 bytes long
- 1 'complex' instruction, up to 4 uOPs or 3 per cycle for instructions longer than 4 uOPs
*/
static struct {
int nr_uops;
const p6_uop_t *uops[6];
/*Earliest time a uop can start. If the timestamp is -1, then the uop is
part of a dependency chain and the start time is the completion time of
the previous uop*/
int earliest_start[6];
} decode_buffer;
#define NR_OPSEQS 3
/*Timestamps of when the last three op sequences completed. Technically this is incorrect,
as the actual size of the opseq buffer is 20 bytes and not 18, but I'm restricted to multiples of 6*/
static int opseq_completion_timestamp[NR_OPSEQS];
static int next_opseq = 0;
#define NR_REGS 8
/*Timestamp of when last operation on an integer register completed*/
static int reg_available_timestamp[NR_REGS];
/*Timestamp of when last operation on an FPU register completed*/
static int fpu_st_timestamp[8];
/*Completion time of the last uop to be processed. Used to calculate timing of
dependent uop chains*/
static int last_uop_timestamp = 0;
void
decode_flush_p6(void)
{
int start_timestamp;
int uop_timestamp = 0;
/*Decoded opseq can not be submitted if there are no free spaces in the
opseq buffer*/
if (decode_timestamp < opseq_completion_timestamp[next_opseq])
decode_timestamp = opseq_completion_timestamp[next_opseq];
/*Ensure that uops can not be submitted before they have been decoded*/
if (decode_timestamp > last_uop_timestamp)
last_uop_timestamp = decode_timestamp;
/*Submit uops to execution units, and determine the latest completion time*/
for (int c = 0; c < (decode_buffer.nr_uops); c++) {
if (decode_buffer.earliest_start[c] == -1)
start_timestamp = last_uop_timestamp;
else
start_timestamp = decode_buffer.earliest_start[c];
last_uop_timestamp = uop_run(decode_buffer.uops[c], start_timestamp);
if (last_uop_timestamp > uop_timestamp)
uop_timestamp = last_uop_timestamp;
}
/*Calculate opseq completion time. Since opseqs complete in order, it
must be after the last completion.*/
if (uop_timestamp <= last_complete_timestamp)
last_complete_timestamp = last_complete_timestamp + 1;
else
last_complete_timestamp = uop_timestamp;
/*Advance to next opseq in buffer*/
opseq_completion_timestamp[next_opseq] = last_complete_timestamp;
next_opseq++;
if (next_opseq == NR_OPSEQS)
next_opseq = 0;
decode_timestamp++;
decode_buffer.nr_uops = 0;
}
/*The instruction is only of interest here if it's longer than 7 bytes, as that's the
limit on P6 simple decoding*/
static int
codegen_timing_instr_length(uint64_t deps, uint32_t fetchdat, int op_32)
{
int len = prefixes + 1; /*Opcode*/
if (deps & MODRM) {
len++; /*ModR/M*/
if (deps & HAS_IMM8)
len++;
if (deps & HAS_IMM1632)
len += (op_32 & 0x100) ? 4 : 2;
if (op_32 & 0x200) {
if ((fetchdat & 7) == 4 && (fetchdat & 0xc0) != 0xc0) {
/* Has SIB*/
len++;
if ((fetchdat & 0xc0) == 0x40)
len++;
else if ((fetchdat & 0xc0) == 0x80)
len += 4;
else if ((fetchdat & 0x700) == 0x500)
len += 4;
} else {
if ((fetchdat & 0xc0) == 0x40)
len++;
else if ((fetchdat & 0xc0) == 0x80)
len += 4;
else if ((fetchdat & 0xc7) == 0x05)
len += 4;
}
} else {
if ((fetchdat & 0xc0) == 0x40)
len++;
else if ((fetchdat & 0xc0) == 0x80)
len += 2;
else if ((fetchdat & 0xc7) == 0x06)
len += 2;
}
}
return len;
}
static void
decode_instruction(const macro_op_t *ins, uint64_t deps, uint32_t fetchdat, int op_32, int bit8)
{
uint32_t regmask_required;
uint32_t regmask_modified;
int c;
int d = 0; /*Complex decoder uOPs*/
int earliest_start = 0;
decode_type_t decode_type = ins->decode_type;
int instr_length = codegen_timing_instr_length(deps, fetchdat, op_32);
/*Generate input register mask, and determine the earliest time this
instruction can start. This is not accurate, as this is calculated per
x86 instruction when it should be handled per uop*/
regmask_required = get_dstdep_mask(deps, fetchdat, bit8);
regmask_required |= get_addr_regmask(deps, fetchdat, op_32);
for (c = 0; c < 8; c++) {
if (regmask_required & (1 << c)) {
if (reg_available_timestamp[c] > decode_timestamp)
earliest_start = reg_available_timestamp[c];
}
}
if ((deps & FPU_RW_ST0) && fpu_st_timestamp[0] > decode_timestamp)
earliest_start = fpu_st_timestamp[0];
if ((deps & FPU_RW_ST1) && fpu_st_timestamp[1] > decode_timestamp)
earliest_start = fpu_st_timestamp[1];
if (deps & FPU_RW_STREG) {
int reg = fetchdat & 7;
if (fpu_st_timestamp[reg] > decode_timestamp)
earliest_start = fpu_st_timestamp[reg];
}
/*Simple decoders are limited to 7 bytes & 1 uOP*/
if ((decode_type == DECODE_SIMPLE && instr_length > 7) || (decode_type == DECODE_SIMPLE && ins->nr_uops > 1))
decode_type = DECODE_COMPLEX;
switch (decode_type) {
case DECODE_SIMPLE:
if (decode_buffer.nr_uops - d == 2) {
decode_buffer.uops[decode_buffer.nr_uops] = &ins->uop[0];
decode_buffer.earliest_start[decode_buffer.nr_uops] = earliest_start;
decode_buffer.nr_uops = 3;
decode_flush_p6();
} else if (decode_buffer.nr_uops - d == 1) {
decode_buffer.uops[decode_buffer.nr_uops] = &ins->uop[0];
decode_buffer.earliest_start[decode_buffer.nr_uops] = earliest_start;
decode_buffer.nr_uops = 2 + d;
if (d)
decode_flush_p6();
} else if (decode_buffer.nr_uops) {
decode_buffer.uops[decode_buffer.nr_uops] = &ins->uop[0];
decode_buffer.earliest_start[decode_buffer.nr_uops] = earliest_start;
decode_buffer.nr_uops = 1 + d;
} else {
decode_buffer.nr_uops = 1;
decode_buffer.uops[0] = &ins->uop[0];
decode_buffer.earliest_start[0] = earliest_start;
}
break;
case DECODE_COMPLEX:
if (decode_buffer.nr_uops)
decode_flush_p6(); /*The 4-1-1 arrangement implies that a complex ins. can't be decoded after a simple one*/
d = 0;
for (c = 0; c < ins->nr_uops; c++) {
decode_buffer.uops[d] = &ins->uop[c];
if (c == 0)
decode_buffer.earliest_start[d] = earliest_start;
else
decode_buffer.earliest_start[d] = -1;
d++;
if ((d == 3) && (ins->nr_uops > 4)) { /*Ins. with >4 uOPs require the use of special units only present on 3 translate PLAs*/
d = 0;
decode_buffer.nr_uops = 3;
decode_flush_p6(); /*The other two decoders are halted to preserve in-order issue*/
}
}
if (d) {
decode_buffer.nr_uops = d;
}
break;
}
/*Update write timestamps for any output registers*/
regmask_modified = get_dstdep_mask(deps, fetchdat, bit8);
for (c = 0; c < 8; c++) {
if (regmask_modified & (1 << c))
reg_available_timestamp[c] = last_complete_timestamp;
}
if (deps & FPU_POP) {
for (c = 0; c < 7; c++)
fpu_st_timestamp[c] = fpu_st_timestamp[c + 1];
fpu_st_timestamp[7] = 0;
}
if (deps & FPU_POP2) {
for (c = 0; c < 6; c++)
fpu_st_timestamp[c] = fpu_st_timestamp[c + 2];
fpu_st_timestamp[6] = fpu_st_timestamp[7] = 0;
}
if (deps & FPU_PUSH) {
for (c = 0; c < 7; c++)
fpu_st_timestamp[c + 1] = fpu_st_timestamp[c];
fpu_st_timestamp[0] = 0;
}
if (deps & FPU_WRITE_ST0)
fpu_st_timestamp[0] = last_complete_timestamp;
if (deps & FPU_WRITE_ST1)
fpu_st_timestamp[1] = last_complete_timestamp;
if (deps & FPU_WRITE_STREG) {
int reg = fetchdat & 7;
if (deps & FPU_POP)
reg--;
if (reg >= 0 && !(reg == 0 && (deps & FPU_WRITE_ST0)) && !(reg == 1 && (deps & FPU_WRITE_ST1)))
fpu_st_timestamp[reg] = last_complete_timestamp;
}
}
void
codegen_timing_p6_block_start(void)
{
int c;
for (c = 0; c < nr_units; c++)
units[c].first_available_cycle = 0;
decode_timestamp = 0;
last_complete_timestamp = 0;
for (c = 0; c < NR_OPSEQS; c++)
opseq_completion_timestamp[c] = 0;
next_opseq = 0;
for (c = 0; c < NR_REGS; c++)
reg_available_timestamp[c] = 0;
for (c = 0; c < 8; c++)
fpu_st_timestamp[c] = 0;
}
void
codegen_timing_p6_start(void)
{
if (cpu_s->cpu_type == CPU_PENTIUMPRO) {
units = ppro_units;
nr_units = NR_PPRO_UNITS;
} else {
units = p2_units;
nr_units = NR_P2_UNITS;
}
last_prefix = 0;
prefixes = 0;
}
void
codegen_timing_p6_prefix(uint8_t prefix, uint32_t fetchdat)
{
if (prefix != 0x0f)
decode_timestamp++;
last_prefix = prefix;
prefixes++;
}
void
codegen_timing_p6_opcode(uint8_t opcode, uint32_t fetchdat, int op_32, UNUSED(uint32_t op_pc))
{
const macro_op_t **ins_table;
const uint64_t *deps;
int mod3 = ((fetchdat & 0xc0) == 0xc0);
int old_last_complete_timestamp = last_complete_timestamp;
int bit8 = !(opcode & 1);
switch (last_prefix) {
case 0x0f:
ins_table = mod3 ? opcode_timings_p6_0f_mod3 : opcode_timings_p6_0f;
deps = mod3 ? opcode_deps_0f_mod3 : opcode_deps_0f;
break;
case 0xd8:
ins_table = mod3 ? opcode_timings_p6_d8_mod3 : opcode_timings_p6_d8;
deps = mod3 ? opcode_deps_d8_mod3 : opcode_deps_d8;
opcode = (opcode >> 3) & 7;
break;
case 0xd9:
ins_table = mod3 ? opcode_timings_p6_d9_mod3 : opcode_timings_p6_d9;
deps = mod3 ? opcode_deps_d9_mod3 : opcode_deps_d9;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xda:
ins_table = mod3 ? opcode_timings_p6_da_mod3 : opcode_timings_p6_da;
deps = mod3 ? opcode_deps_da_mod3 : opcode_deps_da;
opcode = (opcode >> 3) & 7;
break;
case 0xdb:
ins_table = mod3 ? opcode_timings_p6_db_mod3 : opcode_timings_p6_db;
deps = mod3 ? opcode_deps_db_mod3 : opcode_deps_db;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xdc:
ins_table = mod3 ? opcode_timings_p6_dc_mod3 : opcode_timings_p6_dc;
deps = mod3 ? opcode_deps_dc_mod3 : opcode_deps_dc;
opcode = (opcode >> 3) & 7;
break;
case 0xdd:
ins_table = mod3 ? opcode_timings_p6_dd_mod3 : opcode_timings_p6_dd;
deps = mod3 ? opcode_deps_dd_mod3 : opcode_deps_dd;
opcode = (opcode >> 3) & 7;
break;
case 0xde:
ins_table = mod3 ? opcode_timings_p6_de_mod3 : opcode_timings_p6_de;
deps = mod3 ? opcode_deps_de_mod3 : opcode_deps_de;
opcode = (opcode >> 3) & 7;
break;
case 0xdf:
ins_table = mod3 ? opcode_timings_p6_df_mod3 : opcode_timings_p6_df;
deps = mod3 ? opcode_deps_df_mod3 : opcode_deps_df;
opcode = (opcode >> 3) & 7;
break;
default:
switch (opcode) {
case 0x80:
case 0x82:
ins_table = mod3 ? opcode_timings_p6_80_mod3 : opcode_timings_p6_80;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0x81:
case 0x83:
ins_table = mod3 ? opcode_timings_p6_8x_mod3 : opcode_timings_p6_8x;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc0:
case 0xd0:
case 0xd2:
ins_table = mod3 ? opcode_timings_p6_shift_b_mod3 : opcode_timings_p6_shift_b;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc1:
case 0xd1:
case 0xd3:
ins_table = mod3 ? opcode_timings_p6_shift_mod3 : opcode_timings_p6_shift;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf6:
ins_table = mod3 ? opcode_timings_p6_f6_mod3 : opcode_timings_p6_f6;
deps = mod3 ? opcode_deps_f6_mod3 : opcode_deps_f6;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf7:
ins_table = mod3 ? opcode_timings_p6_f7_mod3 : opcode_timings_p6_f7;
deps = mod3 ? opcode_deps_f7_mod3 : opcode_deps_f7;
opcode = (fetchdat >> 3) & 7;
break;
case 0xff:
ins_table = mod3 ? opcode_timings_p6_ff_mod3 : opcode_timings_p6_ff;
deps = mod3 ? opcode_deps_ff_mod3 : opcode_deps_ff;
opcode = (fetchdat >> 3) & 7;
break;
default:
ins_table = mod3 ? opcode_timings_p6_mod3 : opcode_timings_p6;
deps = mod3 ? opcode_deps_mod3 : opcode_deps;
break;
}
}
if (ins_table[opcode])
decode_instruction(ins_table[opcode], deps[opcode], fetchdat, op_32, bit8);
else
decode_instruction(&complex_alu1_op, 0, fetchdat, op_32, bit8);
codegen_block_cycles += (last_complete_timestamp - old_last_complete_timestamp);
}
void
codegen_timing_p6_block_end(void)
{
if (decode_buffer.nr_uops) {
int old_last_complete_timestamp = last_complete_timestamp;
decode_flush_p6();
codegen_block_cycles += (last_complete_timestamp - old_last_complete_timestamp);
}
}
int
codegen_timing_p6_jump_cycles(void)
{
if (decode_buffer.nr_uops)
return 1;
return 0;
}
codegen_timing_t codegen_timing_p6 = {
codegen_timing_p6_start,
codegen_timing_p6_prefix,
codegen_timing_p6_opcode,
codegen_timing_p6_block_start,
codegen_timing_p6_block_end,
codegen_timing_p6_jump_cycles
};
``` | /content/code_sandbox/src/cpu/codegen_timing_p6.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 26,124 |
```objective-c
/*Cyrix-only instructions*/
/*System Management Mode*/
static void
opSVDC_common(uint32_t fetchdat)
{
switch (rmdat & 0x38) {
case 0x00: /*ES*/
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_es);
writememw(0, easeg + cpu_state.eaaddr + 8, ES);
break;
case 0x08: /*CS*/
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_cs);
writememw(0, easeg + cpu_state.eaaddr + 8, CS);
break;
case 0x18: /*DS*/
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_ds);
writememw(0, easeg + cpu_state.eaaddr + 8, DS);
break;
case 0x10: /*SS*/
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_ss);
writememw(0, easeg + cpu_state.eaaddr + 8, SS);
break;
case 0x20: /*FS*/
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_fs);
writememw(0, easeg + cpu_state.eaaddr + 8, FS);
break;
case 0x28: /*GS*/
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_gs);
writememw(0, easeg + cpu_state.eaaddr + 8, GS);
break;
default:
x86illegal();
}
}
static int
opSVDC_a16(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
opSVDC_common(fetchdat);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opSVDC_a32(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
opSVDC_common(fetchdat);
} else
x86illegal();
return cpu_state.abrt;
}
static void
opRSDC_common(uint32_t fetchdat)
{
switch (rmdat & 0x38) {
case 0x00: /*ES*/
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_es);
break;
case 0x18: /*DS*/
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_ds);
break;
case 0x10: /*SS*/
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_ss);
break;
case 0x20: /*FS*/
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_fs);
break;
case 0x28: /*GS*/
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &cpu_state.seg_gs);
break;
default:
x86illegal();
}
}
static int
opRSDC_a16(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
opRSDC_common(fetchdat);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opRSDC_a32(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
opRSDC_common(fetchdat);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opSVLDT_a16(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &ldt);
writememw(0, easeg + cpu_state.eaaddr + 8, ldt.seg);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opSVLDT_a32(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &ldt);
writememw(0, easeg + cpu_state.eaaddr + 8, ldt.seg);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opRSLDT_a16(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &ldt);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opRSLDT_a32(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
cyrix_load_seg_descriptor(easeg + cpu_state.eaaddr, &ldt);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opSVTS_a16(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &tr);
writememw(0, easeg + cpu_state.eaaddr + 8, tr.seg);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opSVTS_a32(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &tr);
writememw(0, easeg + cpu_state.eaaddr + 8, tr.seg);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opRSTS_a16(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &tr);
writememw(0, easeg + cpu_state.eaaddr + 8, tr.seg);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opRSTS_a32(uint32_t fetchdat)
{
if (in_smm) {
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
cyrix_write_seg_descriptor(easeg + cpu_state.eaaddr, &tr);
writememw(0, easeg + cpu_state.eaaddr + 8, tr.seg);
} else
x86illegal();
return cpu_state.abrt;
}
static int
opSMINT(uint32_t fetchdat)
{
if (in_smm)
fatal("opSMINT\n");
else
x86illegal();
return 1;
}
static int
opRDSHR_a16(uint32_t fetchdat)
{
if (in_smm)
fatal("opRDSHR_a16\n");
else
x86illegal();
return 1;
}
static int
opRDSHR_a32(uint32_t fetchdat)
{
if (in_smm)
fatal("opRDSHR_a32\n");
else
x86illegal();
return 1;
}
static int
opWRSHR_a16(uint32_t fetchdat)
{
if (in_smm)
fatal("opWRSHR_a16\n");
else
x86illegal();
return 1;
}
static int
opWRSHR_a32(uint32_t fetchdat)
{
if (in_smm)
fatal("opWRSHR_a32\n");
else
x86illegal();
return 1;
}
``` | /content/code_sandbox/src/cpu/x86_ops_cyrix.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,887 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* CPU type handler.
*
*
*
* Authors: Miran Grca, <mgrca8@gmail.com>
*
*/
#ifndef EMU_X87_SF_H
#define EMU_X87_SF_H
#include "softfloat3e/softfloat.h"
typedef struct {
uint16_t cwd;
uint16_t swd;
uint16_t tag;
uint16_t foo;
uint32_t fip;
uint32_t fdp;
uint16_t fcs;
uint16_t fds;
floatx80 st_space[8];
unsigned char tos;
unsigned char align1;
unsigned char align2;
unsigned char align3;
} fpu_state_t;
extern fpu_state_t fpu_state;
#endif /*EMU_X87_SF_H*/
``` | /content/code_sandbox/src/cpu/x87_sf.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 244 |
```objective-c
static int
opXCHG_b_a16(uint32_t fetchdat)
{
uint8_t temp;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
seteab(getr8(cpu_reg));
if (cpu_state.abrt)
return 1;
setr8(cpu_reg, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 3 : 5, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
return 0;
}
static int
opXCHG_b_a32(uint32_t fetchdat)
{
uint8_t temp;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
seteab(getr8(cpu_reg));
if (cpu_state.abrt)
return 1;
setr8(cpu_reg, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 3 : 5, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
return 0;
}
static int
opXCHG_w_a16(uint32_t fetchdat)
{
uint16_t temp;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
seteaw(cpu_state.regs[cpu_reg].w);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = temp;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 3 : 5, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
return 0;
}
static int
opXCHG_w_a32(uint32_t fetchdat)
{
uint16_t temp;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
seteaw(cpu_state.regs[cpu_reg].w);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].w = temp;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 3 : 5, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
return 0;
}
static int
opXCHG_l_a16(uint32_t fetchdat)
{
uint32_t temp;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
seteal(cpu_state.regs[cpu_reg].l);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = temp;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 3 : 5, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
return 0;
}
static int
opXCHG_l_a32(uint32_t fetchdat)
{
uint32_t temp;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
seteal(cpu_state.regs[cpu_reg].l);
if (cpu_state.abrt)
return 1;
cpu_state.regs[cpu_reg].l = temp;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 5);
PREFETCH_RUN((cpu_mod == 3) ? 3 : 5, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
return 0;
}
static int
opXCHG_AX_BX(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = BX;
BX = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_AX_CX(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = CX;
CX = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_AX_DX(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = DX;
DX = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_AX_SI(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = SI;
SI = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_AX_DI(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = DI;
DI = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_AX_BP(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = BP;
BP = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_AX_SP(uint32_t fetchdat)
{
uint16_t temp = AX;
AX = SP;
SP = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_EBX(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = EBX;
EBX = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_ECX(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = ECX;
ECX = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_EDX(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = EDX;
EDX = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_ESI(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = ESI;
ESI = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_EDI(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = EDI;
EDI = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_EBP(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = EBP;
EBP = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opXCHG_EAX_ESP(uint32_t fetchdat)
{
uint32_t temp = EAX;
EAX = ESP;
ESP = temp;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
#define opBSWAP(reg) \
static int opBSWAP_##reg(uint32_t fetchdat) \
{ \
reg = (reg >> 24) | ((reg >> 8) & 0xff00) | ((reg << 8) & 0xff0000) | ((reg << 24) & 0xff000000); \
CLOCK_CYCLES(1); \
PREFETCH_RUN(1, 1, -1, 0, 0, 0, 0, 0); \
return 0; \
}
// clang-format off
opBSWAP(EAX)
opBSWAP(EBX)
opBSWAP(ECX)
opBSWAP(EDX)
opBSWAP(ESI)
opBSWAP(EDI)
opBSWAP(EBP)
opBSWAP(ESP)
// clang-format on
``` | /content/code_sandbox/src/cpu/x86_ops_xchg.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,505 |
```objective-c
see COPYING for more details
*/
static int
opESCAPE_d8_a16(uint32_t fetchdat)
{
//pclog("D8 A16: fetchdat=%02x.\n", (fetchdat >> 3) & 0x1f);
return x86_opcodes_d8_a16[(fetchdat >> 3) & 0x1f](fetchdat);
}
static int
opESCAPE_d8_a32(uint32_t fetchdat)
{
return x86_opcodes_d8_a32[(fetchdat >> 3) & 0x1f](fetchdat);
}
static int
opESCAPE_d9_a16(uint32_t fetchdat)
{
//pclog("D9 A16: fetchdat=%02x.\n", fetchdat & 0xff);
return x86_opcodes_d9_a16[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_d9_a32(uint32_t fetchdat)
{
return x86_opcodes_d9_a32[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_da_a16(uint32_t fetchdat)
{
//pclog("DA A16: fetchdat=%02x.\n", fetchdat & 0xff);
return x86_opcodes_da_a16[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_da_a32(uint32_t fetchdat)
{
return x86_opcodes_da_a32[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_db_a16(uint32_t fetchdat)
{
//pclog("DB A16: fetchdat=%02x.\n", fetchdat & 0xff);
return x86_opcodes_db_a16[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_db_a32(uint32_t fetchdat)
{
return x86_opcodes_db_a32[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_dc_a16(uint32_t fetchdat)
{
//pclog("DC A16: fetchdat=%02x.\n", (fetchdat >> 3) & 0x1f);
return x86_opcodes_dc_a16[(fetchdat >> 3) & 0x1f](fetchdat);
}
static int
opESCAPE_dc_a32(uint32_t fetchdat)
{
return x86_opcodes_dc_a32[(fetchdat >> 3) & 0x1f](fetchdat);
}
static int
opESCAPE_dd_a16(uint32_t fetchdat)
{
//pclog("DD A16: fetchdat=%02x.\n", fetchdat & 0xff);
return x86_opcodes_dd_a16[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_dd_a32(uint32_t fetchdat)
{
return x86_opcodes_dd_a32[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_de_a16(uint32_t fetchdat)
{
//pclog("DE A16: fetchdat=%02x.\n", fetchdat & 0xff);
return x86_opcodes_de_a16[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_de_a32(uint32_t fetchdat)
{
return x86_opcodes_de_a32[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_df_a16(uint32_t fetchdat)
{
//pclog("DF A16: fetchdat=%02x.\n", fetchdat & 0xff);
return x86_opcodes_df_a16[fetchdat & 0xff](fetchdat);
}
static int
opESCAPE_df_a32(uint32_t fetchdat)
{
return x86_opcodes_df_a32[fetchdat & 0xff](fetchdat);
}
static int
opWAIT(uint32_t fetchdat)
{
if ((cr0 & 0xa) == 0xa) {
x86_int(7);
return 1;
}
#if 0
if (!cpu_use_dynarec && fpu_softfloat) {
#endif
if (fpu_softfloat) {
if (fpu_state.swd & FPU_SW_Summary) {
if (cr0 & 0x20) {
x86_int(16);
return 1;
}
}
}
CLOCK_CYCLES(4);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_fpu.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 974 |
```c
#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <math.h>
#ifndef INFINITY
# define INFINITY (__builtin_inff())
#endif
#include <86box/86box.h>
#include "cpu.h"
#include <86box/timer.h>
#include "x86.h"
#include "x86_ops.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include "x87_sf.h"
#include "x87.h"
#include "x86_flags.h"
#include <86box/io.h>
#include <86box/mem.h>
#include <86box/nmi.h>
#include <86box/pic.h>
#include <86box/gdbstub.h>
#include "codegen.h"
#include <86box/plat_unused.h>
#include <86box/plat_fallthrough.h>
#define CPU_BLOCK_END() cpu_block_end = 1
#ifndef IS_DYNAREC
# define IS_DYNAREC
#endif
#include "386_common.h"
static __inline void
fetch_ea_32_long(UNUSED(uint32_t rmdat))
{
eal_r = eal_w = NULL;
easeg = cpu_state.ea_seg->base;
if (easeg != 0xFFFFFFFF && ((easeg + cpu_state.eaaddr) & 0xFFF) <= 0xFFC) {
uint32_t addr = easeg + cpu_state.eaaddr;
if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
eal_r = (uint32_t *) (readlookup2[addr >> 12] + addr);
if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
eal_w = (uint32_t *) (writelookup2[addr >> 12] + addr);
}
}
static __inline void
fetch_ea_16_long(UNUSED(uint32_t rmdat))
{
eal_r = eal_w = NULL;
easeg = cpu_state.ea_seg->base;
if (easeg != 0xFFFFFFFF && ((easeg + cpu_state.eaaddr) & 0xFFF) <= 0xFFC) {
uint32_t addr = easeg + cpu_state.eaaddr;
if (readlookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
eal_r = (uint32_t *) (readlookup2[addr >> 12] + addr);
if (writelookup2[addr >> 12] != (uintptr_t) LOOKUP_INV)
eal_w = (uint32_t *) (writelookup2[addr >> 12] + addr);
}
}
#define fetch_ea_16(rmdat) \
cpu_state.pc++; \
if (cpu_mod != 3) \
fetch_ea_16_long(rmdat);
#define fetch_ea_32(rmdat) \
cpu_state.pc++; \
if (cpu_mod != 3) \
fetch_ea_32_long(rmdat);
#define PREFETCH_RUN(instr_cycles, bytes, modrm, reads, read_ls, writes, write_ls, ea32)
#define PREFETCH_PREFIX()
#define PREFETCH_FLUSH()
#define OP_TABLE(name) dynarec_ops_##name
#define CLOCK_CYCLES(c)
#if 0
# define CLOCK_CYCLES_FPU(c)
# define CONCURRENCY_CYCLES(c) fpu_cycles = (c)
#else
# define CLOCK_CYCLES_FPU(c)
# define CONCURRENCY_CYCLES(c)
#endif
#define CLOCK_CYCLES_ALWAYS(c) cycles -= (c)
#include "386_ops.h"
``` | /content/code_sandbox/src/cpu/386_dynarec_ops.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 798 |
```objective-c
static int
opIN_AL_imm(uint32_t fetchdat)
{
uint16_t port = (uint16_t) getbytef();
check_io_perm(port, 1);
AL = inb(port);
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 2, -1, 1, 0, 0, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opIN_AX_imm(uint32_t fetchdat)
{
uint16_t port = (uint16_t) getbytef();
check_io_perm(port, 2);
AX = inw(port);
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 2, -1, 1, 0, 0, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opIN_EAX_imm(uint32_t fetchdat)
{
uint16_t port = (uint16_t) getbytef();
check_io_perm(port, 4);
EAX = inl(port);
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 2, -1, 0, 1, 0, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opOUT_AL_imm(uint32_t fetchdat)
{
uint16_t port = (uint16_t) getbytef();
check_io_perm(port, 1);
outb(port, AL);
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, -1, 0, 0, 1, 0, 0);
if (port == 0x64)
return x86_was_reset;
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opOUT_AX_imm(uint32_t fetchdat)
{
uint16_t port = (uint16_t) getbytef();
check_io_perm(port, 2);
outw(port, AX);
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, -1, 0, 0, 1, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opOUT_EAX_imm(uint32_t fetchdat)
{
uint16_t port = (uint16_t) getbytef();
check_io_perm(port, 4);
outl(port, EAX);
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, -1, 0, 0, 0, 1, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opIN_AL_DX(uint32_t fetchdat)
{
check_io_perm(DX, 1);
AL = inb(DX);
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 1, -1, 1, 0, 0, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opIN_AX_DX(uint32_t fetchdat)
{
check_io_perm(DX, 2);
AX = inw(DX);
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 1, -1, 1, 0, 0, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opIN_EAX_DX(uint32_t fetchdat)
{
check_io_perm(DX, 4);
EAX = inl(DX);
CLOCK_CYCLES(12);
PREFETCH_RUN(12, 1, -1, 0, 1, 0, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opOUT_AL_DX(uint32_t fetchdat)
{
check_io_perm(DX, 1);
outb(DX, AL);
CLOCK_CYCLES(11);
PREFETCH_RUN(11, 1, -1, 0, 0, 1, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return x86_was_reset;
}
static int
opOUT_AX_DX(uint32_t fetchdat)
{
check_io_perm(DX, 2);
outw(DX, AX);
CLOCK_CYCLES(11);
PREFETCH_RUN(11, 1, -1, 0, 0, 1, 0, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
static int
opOUT_EAX_DX(uint32_t fetchdat)
{
check_io_perm(DX, 4);
outl(DX, EAX);
PREFETCH_RUN(11, 1, -1, 0, 0, 0, 1, 0);
if (nmi && nmi_enable && nmi_mask)
return 1;
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_io.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,214 |
```objective-c
#define op_seg(name, seg, opcode_table, normal_opcode_table) \
static int op##name##_w_a16(uint32_t fetchdat) \
{ \
int legal; \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
if (in_lock) { \
legal = is_lock_legal(fetchdat); \
\
ILLEGAL_ON(legal == 0); \
} \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[fetchdat & 0xff]) \
return opcode_table[fetchdat & 0xff](fetchdat >> 8); \
return normal_opcode_table[fetchdat & 0xff](fetchdat >> 8); \
} \
\
static int op##name##_l_a16(uint32_t fetchdat) \
{ \
int legal; \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
if (in_lock) { \
legal = is_lock_legal(fetchdat); \
\
ILLEGAL_ON(legal == 0); \
} \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[(fetchdat & 0xff) | 0x100]) \
return opcode_table[(fetchdat & 0xff) | 0x100](fetchdat >> 8); \
return normal_opcode_table[(fetchdat & 0xff) | 0x100](fetchdat >> 8); \
} \
\
static int op##name##_w_a32(uint32_t fetchdat) \
{ \
int legal; \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
if (in_lock) { \
legal = is_lock_legal(fetchdat); \
\
ILLEGAL_ON(legal == 0); \
} \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[(fetchdat & 0xff) | 0x200]) \
return opcode_table[(fetchdat & 0xff) | 0x200](fetchdat >> 8); \
return normal_opcode_table[(fetchdat & 0xff) | 0x200](fetchdat >> 8); \
} \
\
static int op##name##_l_a32(uint32_t fetchdat) \
{ \
int legal; \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
if (in_lock) { \
legal = is_lock_legal(fetchdat); \
\
ILLEGAL_ON(legal == 0); \
} \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[(fetchdat & 0xff) | 0x300]) \
return opcode_table[(fetchdat & 0xff) | 0x300](fetchdat >> 8); \
return normal_opcode_table[(fetchdat & 0xff) | 0x300](fetchdat >> 8); \
}
// clang-format off
op_seg(CS, cpu_state.seg_cs, x86_2386_opcodes, x86_2386_opcodes)
op_seg(DS, cpu_state.seg_ds, x86_2386_opcodes, x86_2386_opcodes)
op_seg(ES, cpu_state.seg_es, x86_2386_opcodes, x86_2386_opcodes)
op_seg(FS, cpu_state.seg_fs, x86_2386_opcodes, x86_2386_opcodes)
op_seg(GS, cpu_state.seg_gs, x86_2386_opcodes, x86_2386_opcodes)
op_seg(SS, cpu_state.seg_ss, x86_2386_opcodes, x86_2386_opcodes)
// clang-format on
#define op_srp(name, seg, opcode_table, normal_opcode_table) \
static int op##name##_w_a16(uint32_t fetchdat) \
{ \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[fetchdat & 0xff]) \
return opcode_table[fetchdat & 0xff](fetchdat >> 8); \
return normal_opcode_table[fetchdat & 0xff](fetchdat >> 8); \
} \
\
static int op##name##_l_a16(uint32_t fetchdat) \
{ \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[(fetchdat & 0xff) | 0x100]) \
return opcode_table[(fetchdat & 0xff) | 0x100](fetchdat >> 8); \
return normal_opcode_table[(fetchdat & 0xff) | 0x100](fetchdat >> 8); \
} \
\
static int op##name##_w_a32(uint32_t fetchdat) \
{ \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[(fetchdat & 0xff) | 0x200]) \
return opcode_table[(fetchdat & 0xff) | 0x200](fetchdat >> 8); \
return normal_opcode_table[(fetchdat & 0xff) | 0x200](fetchdat >> 8); \
} \
\
static int op##name##_l_a32(uint32_t fetchdat) \
{ \
fetchdat = fastreadl(cs + cpu_state.pc); \
if (cpu_state.abrt) \
return 1; \
cpu_state.pc++; \
\
cpu_state.ea_seg = &seg; \
cpu_state.ssegs = 1; \
CLOCK_CYCLES(4); \
PREFETCH_PREFIX(); \
\
if (opcode_table[(fetchdat & 0xff) | 0x300]) \
return opcode_table[(fetchdat & 0xff) | 0x300](fetchdat >> 8); \
return normal_opcode_table[(fetchdat & 0xff) | 0x300](fetchdat >> 8); \
}
// clang-format off
op_srp(CS_REPE, cpu_state.seg_cs, x86_2386_opcodes_REPE, x86_2386_opcodes)
op_srp(DS_REPE, cpu_state.seg_ds, x86_2386_opcodes_REPE, x86_2386_opcodes)
op_srp(ES_REPE, cpu_state.seg_es, x86_2386_opcodes_REPE, x86_2386_opcodes)
op_srp(FS_REPE, cpu_state.seg_fs, x86_2386_opcodes_REPE, x86_2386_opcodes)
op_srp(GS_REPE, cpu_state.seg_gs, x86_2386_opcodes_REPE, x86_2386_opcodes)
op_srp(SS_REPE, cpu_state.seg_ss, x86_2386_opcodes_REPE, x86_2386_opcodes)
op_srp(CS_REPNE, cpu_state.seg_cs, x86_2386_opcodes_REPNE, x86_2386_opcodes)
op_srp(DS_REPNE, cpu_state.seg_ds, x86_2386_opcodes_REPNE, x86_2386_opcodes)
op_srp(ES_REPNE, cpu_state.seg_es, x86_2386_opcodes_REPNE, x86_2386_opcodes)
op_srp(FS_REPNE, cpu_state.seg_fs, x86_2386_opcodes_REPNE, x86_2386_opcodes)
op_srp(GS_REPNE, cpu_state.seg_gs, x86_2386_opcodes_REPNE, x86_2386_opcodes)
op_srp(SS_REPNE, cpu_state.seg_ss, x86_2386_opcodes_REPNE, x86_2386_opcodes)
// clang-format on
static int
op_66(uint32_t fetchdat) /*Data size select*/
{
int legal;
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
if (in_lock) {
legal = is_lock_legal(fetchdat);
ILLEGAL_ON(legal == 0);
}
cpu_state.op32 = ((use32 & 0x100) ^ 0x100) | (cpu_state.op32 & 0x200);
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
op_67(uint32_t fetchdat) /*Address size select*/
{
int legal;
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
if (in_lock) {
legal = is_lock_legal(fetchdat);
ILLEGAL_ON(legal == 0);
}
cpu_state.op32 = ((use32 & 0x200) ^ 0x200) | (cpu_state.op32 & 0x100);
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
op_66_REPE(uint32_t fetchdat) /*Data size select*/
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
cpu_state.op32 = ((use32 & 0x100) ^ 0x100) | (cpu_state.op32 & 0x200);
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
return x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
op_67_REPE(uint32_t fetchdat) /*Address size select*/
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
cpu_state.op32 = ((use32 & 0x200) ^ 0x200) | (cpu_state.op32 & 0x100);
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
return x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
op_66_REPNE(uint32_t fetchdat) /*Data size select*/
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
cpu_state.op32 = ((use32 & 0x100) ^ 0x100) | (cpu_state.op32 & 0x200);
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
return x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
op_67_REPNE(uint32_t fetchdat) /*Address size select*/
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
cpu_state.op32 = ((use32 & 0x200) ^ 0x200) | (cpu_state.op32 & 0x100);
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
return x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
``` | /content/code_sandbox/src/cpu/x86_ops_prefix_2386.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 3,297 |
```c
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_ops.h"
#include "x87_sf.h"
#include "x87.h"
#include "codegen.h"
#include "codegen_ops.h"
#include "codegen_timing_common.h"
#define CYCLES(c) (int *) c
#define CYCLES2(c16, c32) (int *) ((-1 & ~0xffff) | c16 | (c32 << 8))
static int *opcode_timings_486[256] = {
// clang-format off
/*00*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), NULL,
/*10*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3),
/*20*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3),
/*30*/ &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2), &timing_mr, &timing_mr, &timing_rm, &timing_rm, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2),
/*40*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*50*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*60*/ CYCLES(11), CYCLES(9), CYCLES(7), CYCLES(9), CYCLES(4), CYCLES(4), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES2(17,25), CYCLES(1), CYCLES2(17,20), CYCLES(17), CYCLES(17), CYCLES(17), CYCLES(17),
/*70*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*80*/ &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm, &timing_rm, CYCLES(5), CYCLES(5), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(1), CYCLES(5), CYCLES(6),
/*90*/ CYCLES(1), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(0), CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(2), CYCLES(3),
/*a0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(7), CYCLES(7), CYCLES(8), CYCLES(8), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6),
/*b0*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*c0*/ CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(1), CYCLES(1), CYCLES(14), CYCLES(5), CYCLES(0), CYCLES(0), &timing_int, &timing_int, CYCLES(3), CYCLES(0),
/*d0*/ CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(15), CYCLES(14), CYCLES(2), CYCLES(4), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(5), CYCLES(14), CYCLES(14), CYCLES(16), CYCLES(16), CYCLES(3), CYCLES(3), CYCLES(17), CYCLES(3), CYCLES(14), CYCLES(14), CYCLES(14), CYCLES(14),
/*f0*/ CYCLES(4), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(4), CYCLES(2), NULL, NULL, CYCLES(2), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(3), NULL
// clang-format on
};
static int *opcode_timings_486_mod3[256] = {
// clang-format off
/*00*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), NULL,
/*10*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(2), CYCLES(3),
/*20*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(3),
/*30*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2), &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(4), CYCLES(2),
/*40*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*50*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*60*/ CYCLES(11), CYCLES(9), CYCLES(7), CYCLES(9), CYCLES(4), CYCLES(4), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES2(14,25), CYCLES(1), CYCLES2(17,20), CYCLES(17), CYCLES(17), CYCLES(17), CYCLES(17),
/*70*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*80*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(5), CYCLES(5), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(1), CYCLES(2), CYCLES(1),
/*90*/ CYCLES(1), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(0), CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(2), CYCLES(3),
/*a0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(7), CYCLES(7), CYCLES(8), CYCLES(8), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6),
/*b0*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr,
/*c0*/ CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(1), CYCLES(1), CYCLES(14), CYCLES(5), CYCLES(0), CYCLES(0), &timing_int, &timing_int, CYCLES(3), CYCLES(0),
/*d0*/ CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(15), CYCLES(14), CYCLES(2), CYCLES(4), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*e0*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(5), CYCLES(14), CYCLES(14), CYCLES(16), CYCLES(16), CYCLES(3), CYCLES(3), CYCLES(17), CYCLES(3), CYCLES(14), CYCLES(14), CYCLES(14), CYCLES(14),
/*f0*/ CYCLES(4), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(4), CYCLES(2), NULL, NULL, CYCLES(2), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(3), NULL
// clang-format on
};
static int *opcode_timings_486_0f[256] = {
// clang-format off
/*00*/ CYCLES(20), CYCLES(11), CYCLES(11), CYCLES(10), NULL, CYCLES(195), CYCLES(7), NULL, CYCLES(1000), CYCLES(10000), NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ CYCLES(9), CYCLES(1), CYCLES(9), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, NULL, NULL, &timing_rm, &timing_rm,
/*70*/ NULL, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, &timing_rm, CYCLES(100), NULL, NULL, NULL, NULL, NULL, NULL, &timing_rm, &timing_rm,
/*80*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*90*/ CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/*a0*/ CYCLES(3), CYCLES(3), CYCLES(14), CYCLES(8), CYCLES(3), CYCLES(4), NULL, NULL, CYCLES(3), CYCLES(3), NULL, CYCLES(13), CYCLES(3), CYCLES(3), NULL, CYCLES2(18,30),
/*b0*/ CYCLES(10), CYCLES(10), CYCLES(6), CYCLES(13), CYCLES(6), CYCLES(6), CYCLES(3), CYCLES(3), NULL, NULL, CYCLES(6), CYCLES(13), CYCLES(7), CYCLES(7), CYCLES(3), CYCLES(3),
/*c0*/ CYCLES(4), CYCLES(4), NULL, NULL, NULL, NULL, NULL, NULL, CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*d0*/ NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm, NULL, NULL, &timing_rm, &timing_rm, NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm,
/*e0*/ NULL, &timing_rm, &timing_rm, NULL, NULL, &timing_rm, NULL, NULL, &timing_rm, &timing_rm, NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm,
/*f0*/ NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm, NULL, NULL, &timing_rm, &timing_rm, &timing_rm, NULL, &timing_rm, &timing_rm, &timing_rm, NULL,
// clang-format on
};
static int *opcode_timings_486_0f_mod3[256] = {
// clang-format off
/*00*/ CYCLES(20), CYCLES(11), CYCLES(11), CYCLES(10), NULL, CYCLES(195), CYCLES(7), NULL, CYCLES(1000), CYCLES(10000), NULL, NULL, NULL, NULL, NULL, NULL,
/*10*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*20*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*30*/ CYCLES(9), CYCLES(1), CYCLES(9), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*40*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*50*/ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*60*/ &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, NULL, NULL, &timing_rr, &timing_rr,
/*70*/ NULL, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, &timing_rr, CYCLES(100), NULL, NULL, NULL, NULL, NULL, NULL, &timing_rr, &timing_rr,
/*80*/ &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt, &timing_bnt,
/*90*/ CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/*a0*/ CYCLES(3), CYCLES(3), CYCLES(14), CYCLES(8), CYCLES(3), CYCLES(4), NULL, NULL, CYCLES(3), CYCLES(3), NULL, CYCLES(13), CYCLES(3), CYCLES(3), NULL, CYCLES2(18,30),
/*b0*/ CYCLES(10), CYCLES(10), CYCLES(6), CYCLES(13), CYCLES(6), CYCLES(6), CYCLES(3), CYCLES(3), NULL, NULL, CYCLES(6), CYCLES(13), CYCLES(7), CYCLES(7), CYCLES(3), CYCLES(3),
/*c0*/ CYCLES(4), CYCLES(4), NULL, NULL, NULL, NULL, NULL, NULL, CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*d0*/ NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr, NULL, NULL, &timing_rr, &timing_rr, NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr,
/*e0*/ NULL, &timing_rr, &timing_rr, NULL, NULL, &timing_rr, NULL, NULL, &timing_rr, &timing_rr, NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr,
/*f0*/ NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr, NULL, NULL, &timing_rr, &timing_rr, &timing_rr, NULL, &timing_rr, &timing_rr, &timing_rr, NULL,
// clang-format on
};
static int *opcode_timings_486_shift[8] = {
// clang-format off
CYCLES(7), CYCLES(7), CYCLES(10), CYCLES(10), CYCLES(7), CYCLES(7), CYCLES(7), CYCLES(7)
};
static int *opcode_timings_486_shift_mod3[8] = {
// clang-format off
CYCLES(3), CYCLES(3), CYCLES(9), CYCLES(9), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3)
// clang-format on
};
static int *opcode_timings_486_f6[8] = {
// clang-format off
&timing_rm, NULL, &timing_mm, &timing_mm, CYCLES(13), CYCLES(14), CYCLES(16), CYCLES(19)
// clang-format on
};
static int *opcode_timings_486_f6_mod3[8] = {
// clang-format off
&timing_rr, NULL, &timing_rr, &timing_rr, CYCLES(13), CYCLES(14), CYCLES(16), CYCLES(19)
// clang-format on
};
static int *opcode_timings_486_f7[8] = {
// clang-format off
&timing_rm, NULL, &timing_mm, &timing_mm, CYCLES(21), CYCLES2(22,38), CYCLES2(24,40), CYCLES2(27,43)
// clang-format on
};
static int *opcode_timings_486_f7_mod3[8] = {
// clang-format off
&timing_rr, NULL, &timing_rr, &timing_rr, CYCLES(21), CYCLES2(22,38), CYCLES2(24,40), CYCLES2(27,43)
};
static int *opcode_timings_486_ff[8] = {
// clang-format off
&timing_mm, &timing_mm, CYCLES(5), CYCLES(0), CYCLES(5), CYCLES(0), CYCLES(5), NULL
};
static int *opcode_timings_486_ff_mod3[8] = {
// clang-format off
&timing_rr, &timing_rr, CYCLES(5), CYCLES(0), CYCLES(5), CYCLES(0), CYCLES(5), NULL
// clang-format on
};
static int *opcode_timings_486_d8[8] = {
// clang-format off
/* FADDil FMULil FCOMil FCOMPil FSUBil FSUBRil FDIVil FDIVRil*/
CYCLES(8), CYCLES(11), CYCLES(4), CYCLES(4), CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP FSUB FSUBR FDIV FDIVR*/
CYCLES(8), CYCLES(16), CYCLES(4), CYCLES(4), CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs FLDENV FLDCW FSTENV FSTCW*/
CYCLES(3), NULL, CYCLES(7), CYCLES(7), CYCLES(34), CYCLES(4), CYCLES(67), CYCLES(3)
// clang-format on
};
static int *opcode_timings_486_d9_mod3[64] = {
// clang-format off
/*FLD*/
CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4),
/*FXCH*/
CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4),
/*FNOP*/
CYCLES(3), NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/*FSTP*/
CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/* opFCHS opFABS opFTST opFXAM*/
CYCLES(6), CYCLES(3), NULL, NULL, CYCLES(4), CYCLES(8), NULL, NULL,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI opFLDEG2 opFLDLN2 opFLDZ*/
CYCLES(4), CYCLES(8), CYCLES(8), CYCLES(8), CYCLES(8), CYCLES(8), CYCLES(4), NULL,
/* opF2XM1 opFYL2X opFPTAN opFPATAN opFDECSTP opFINCSTP,*/
CYCLES(140), CYCLES(196), CYCLES(200), CYCLES(218), NULL, NULL, CYCLES(3), CYCLES(3),
/* opFPREM opFSQRT opFSINCOS opFRNDINT opFSCALE opFSIN opFCOS*/
CYCLES(70), NULL, CYCLES(83), CYCLES(292), CYCLES(21), CYCLES(30), CYCLES(257), CYCLES(257)
// clang-format on
};
static int *opcode_timings_486_da[8] = {
// clang-format off
/* FADDil FMULil FCOMil FCOMPil FSUBil FSUBRil FDIVil FDIVRil*/
CYCLES(8), CYCLES(11), CYCLES(4), CYCLES(4), CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_da_mod3[8] = {
// clang-format off
NULL, NULL, NULL, NULL, NULL, CYCLES(5), NULL, NULL
// clang-format on
};
static int *opcode_timings_486_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil FLDe FSTPe*/
CYCLES(9), NULL, CYCLES(28), CYCLES(28), NULL, CYCLES(5), NULL, CYCLES(6)
// clang-format on
};
static int *opcode_timings_486_db_mod3[64] = {
// clang-format off
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* opFNOP opFCLEX opFINIT opFNOP opFNOP*/
NULL, CYCLES(3), CYCLES(7), CYCLES(17), CYCLES(3), CYCLES(3), NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
// clang-format on
};
static int *opcode_timings_486_dc[8] = {
// clang-format off
/* opFADDd_a16 opFMULd_a16 opFCOMd_a16 opFCOMPd_a16 opFSUBd_a16 opFSUBRd_a16 opFDIVd_a16 opFDIVRd_a16*/
CYCLES(8), CYCLES(11), CYCLES(4), CYCLES(4), CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr opFSUBRr opFSUBr opFDIVRr opFDIVr*/
CYCLES(8), CYCLES(16), NULL, NULL, CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd FRSTOR FSAVE FSTSW*/
CYCLES(3), NULL, CYCLES(8), CYCLES(8), CYCLES(131), NULL, CYCLES(154), CYCLES(3)
// clang-format on
};
static int *opcode_timings_486_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP FUCOM FUCOMP*/
CYCLES(3), NULL, CYCLES(3), CYCLES(3), CYCLES(4), CYCLES(4), NULL, NULL
// clang-format on
};
static int *opcode_timings_486_de[8] = {
// clang-format off
/* FADDiw FMULiw FCOMiw FCOMPiw FSUBil FSUBRil FDIVil FDIVRil*/
CYCLES(8), CYCLES(11), CYCLES(4), CYCLES(4), CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_de_mod3[8] = {
// clang-format off
/* FADD FMUL FCOMPP FSUB FSUBR FDIV FDIVR*/
CYCLES(8), CYCLES(16), NULL, CYCLES(5), CYCLES(8), CYCLES(8), CYCLES(73), CYCLES(73)
// clang-format on
};
static int *opcode_timings_486_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw FILDiq FBSTP FISTPiq*/
CYCLES(13), NULL, CYCLES(29), CYCLES(29), NULL, CYCLES(10), CYCLES(172), CYCLES(28)
// clang-format on
};
static int *opcode_timings_486_df_mod3[8] = {
// clang-format off
/* FFREE FST FSTP FUCOM FUCOMP*/
CYCLES(3), NULL, CYCLES(3), CYCLES(3), CYCLES(4), CYCLES(4), NULL, NULL
// clang-format on
};
static int *opcode_timings_486_8x[8] = {
// clang-format off
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
// clang-format on
};
static int *opcode_timings_486_8x_mod3[8] = {
// clang-format off
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
// clang-format on
};
static int *opcode_timings_486_81[8] = {
// clang-format off
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
// clang-format on
};
static int *opcode_timings_486_81_mod3[8] = {
// clang-format off
&timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_mr, &timing_rm
// clang-format on
};
static int timing_count;
static uint8_t last_prefix;
static uint32_t regmask_modified;
static inline int
COUNT(int *c, int op_32)
{
if ((uintptr_t) c <= 10000)
return (int) (uintptr_t) c;
if (((uintptr_t) c & ~0xffff) == (-1 & ~0xffff)) {
if (op_32 & 0x100)
return ((uintptr_t) c >> 8) & 0xff;
return (uintptr_t) c & 0xff;
}
return *c;
}
void
codegen_timing_486_block_start(void)
{
regmask_modified = 0;
}
void
codegen_timing_486_start(void)
{
timing_count = 0;
last_prefix = 0;
}
void
codegen_timing_486_prefix(uint8_t prefix, uint32_t fetchdat)
{
timing_count += COUNT(opcode_timings_486[prefix], 0);
last_prefix = prefix;
}
void
codegen_timing_486_opcode(uint8_t opcode, uint32_t fetchdat, int op_32, UNUSED(uint32_t op_pc))
{
int **timings;
const uint64_t *deps;
int mod3 = ((fetchdat & 0xc0) == 0xc0);
int bit8 = !(opcode & 1);
switch (last_prefix) {
case 0x0f:
timings = mod3 ? opcode_timings_486_0f_mod3 : opcode_timings_486_0f;
deps = mod3 ? opcode_deps_0f_mod3 : opcode_deps_0f;
break;
case 0xd8:
timings = mod3 ? opcode_timings_486_d8_mod3 : opcode_timings_486_d8;
deps = mod3 ? opcode_deps_d8_mod3 : opcode_deps_d8;
opcode = (opcode >> 3) & 7;
break;
case 0xd9:
timings = mod3 ? opcode_timings_486_d9_mod3 : opcode_timings_486_d9;
deps = mod3 ? opcode_deps_d9_mod3 : opcode_deps_d9;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xda:
timings = mod3 ? opcode_timings_486_da_mod3 : opcode_timings_486_da;
deps = mod3 ? opcode_deps_da_mod3 : opcode_deps_da;
opcode = (opcode >> 3) & 7;
break;
case 0xdb:
timings = mod3 ? opcode_timings_486_db_mod3 : opcode_timings_486_db;
deps = mod3 ? opcode_deps_db_mod3 : opcode_deps_db;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xdc:
timings = mod3 ? opcode_timings_486_dc_mod3 : opcode_timings_486_dc;
deps = mod3 ? opcode_deps_dc_mod3 : opcode_deps_dc;
opcode = (opcode >> 3) & 7;
break;
case 0xdd:
timings = mod3 ? opcode_timings_486_dd_mod3 : opcode_timings_486_dd;
deps = mod3 ? opcode_deps_dd_mod3 : opcode_deps_dd;
opcode = (opcode >> 3) & 7;
break;
case 0xde:
timings = mod3 ? opcode_timings_486_de_mod3 : opcode_timings_486_de;
deps = mod3 ? opcode_deps_de_mod3 : opcode_deps_de;
opcode = (opcode >> 3) & 7;
break;
case 0xdf:
timings = mod3 ? opcode_timings_486_df_mod3 : opcode_timings_486_df;
deps = mod3 ? opcode_deps_df_mod3 : opcode_deps_df;
opcode = (opcode >> 3) & 7;
break;
default:
switch (opcode) {
case 0x80:
case 0x82:
case 0x83:
timings = mod3 ? opcode_timings_486_8x_mod3 : opcode_timings_486_8x;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0x81:
timings = mod3 ? opcode_timings_486_81_mod3 : opcode_timings_486_81;
deps = mod3 ? opcode_deps_81_mod3 : opcode_deps_81;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc0:
case 0xc1:
case 0xd0:
case 0xd1:
case 0xd2:
case 0xd3:
timings = mod3 ? opcode_timings_486_shift_mod3 : opcode_timings_486_shift;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf6:
timings = mod3 ? opcode_timings_486_f6_mod3 : opcode_timings_486_f6;
deps = mod3 ? opcode_deps_f6_mod3 : opcode_deps_f6;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf7:
timings = mod3 ? opcode_timings_486_f7_mod3 : opcode_timings_486_f7;
deps = mod3 ? opcode_deps_f7_mod3 : opcode_deps_f7;
opcode = (fetchdat >> 3) & 7;
break;
case 0xff:
timings = mod3 ? opcode_timings_486_ff_mod3 : opcode_timings_486_ff;
deps = mod3 ? opcode_deps_ff_mod3 : opcode_deps_ff;
opcode = (fetchdat >> 3) & 7;
break;
default:
timings = mod3 ? opcode_timings_486_mod3 : opcode_timings_486;
deps = mod3 ? opcode_deps_mod3 : opcode_deps;
break;
}
}
timing_count += COUNT(timings[opcode], op_32);
if (regmask_modified & get_addr_regmask(deps[opcode], fetchdat, op_32))
timing_count++; /*AGI stall*/
codegen_block_cycles += timing_count;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8);
}
void
codegen_timing_486_block_end(void)
{
//
}
codegen_timing_t codegen_timing_486 = {
codegen_timing_486_start,
codegen_timing_486_prefix,
codegen_timing_486_opcode,
codegen_timing_486_block_start,
codegen_timing_486_block_end,
NULL
};
``` | /content/code_sandbox/src/cpu/codegen_timing_486.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 10,735 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* x87 FPU instructions core.
*
*
*
* Authors: Fred N. van Kempen, <decwiz@yahoo.com>
* Sarah Walker, <path_to_url
* leilei,
* Miran Grca, <mgrca8@gmail.com>
*
*/
#include <math.h>
#include <fenv.h>
#include "x87_timings.h"
#ifdef _MSC_VER
# include <intrin.h>
#endif
#include "x87_ops_conv.h"
#ifdef ENABLE_FPU_LOG
extern void fpu_log(const char *fmt, ...);
#else
# ifndef fpu_log
# define fpu_log(fmt, ...)
# endif
#endif
static int rounding_modes[4] = { FE_TONEAREST, FE_DOWNWARD, FE_UPWARD, FE_TOWARDZERO };
#define ST(x) cpu_state.ST[((cpu_state.TOP + (x)) & 7)]
#define FPU_SW_C3 (0x4000) /* condition bit 3 */
#define FPU_SW_C2 (0x0400) /* condition bit 2 */
#define FPU_SW_C1 (0x0200) /* condition bit 1 */
#define FPU_SW_C0 (0x0100) /* condition bit 0 */
#define X87_TAG_VALID 0
#define X87_TAG_ZERO 1
#define X87_TAG_INVALID 2
#define X87_TAG_EMPTY 3
#define FPU_SW_Zero_Div (0x0004) /* divide by zero */
typedef union {
double d;
struct {
uint64_t mantissa : 52;
uint64_t exponent : 11;
uint64_t negative : 1;
};
} double_decompose_t;
#if defined(_MSC_VER) && !defined(__clang__)
# if defined i386 || defined __i386 || defined __i386__ || defined _X86_ || defined _M_IX86
# define X87_INLINE_ASM
# endif
#else
# if defined i386 || defined __i386 || defined __i386__ || defined _X86_ || defined _M_IX86 || defined _M_X64 || defined __amd64__
# define X87_INLINE_ASM
# endif
#endif
#ifdef FPU_8087
# define x87_div(dst, src1, src2) \
do { \
if (((double) src2) == 0.0) { \
cpu_state.npxs |= FPU_SW_Zero_Div; \
if (cpu_state.npxc & FPU_SW_Zero_Div) \
dst = src1 / (double) src2; \
else { \
fpu_log("FPU : divide by zero\n"); \
if (!(cpu_state.npxc & 0x80)) { \
cpu_state.npxs |= 0x80; \
nmi = 1; \
} \
return 1; \
} \
} else \
dst = src1 / (double) src2; \
} while (0)
#else
# define x87_div(dst, src1, src2) \
do { \
if (((double) src2) == 0.0) { \
cpu_state.npxs |= FPU_SW_Zero_Div; \
if (cpu_state.npxc & FPU_SW_Zero_Div) \
dst = src1 / (double) src2; \
else { \
fpu_log("FPU : divide by zero\n"); \
picint(1 << 13); \
return 1; \
} \
} else \
dst = src1 / (double) src2; \
} while (0)
#endif
static __inline void
x87_push(double i)
{
#ifdef USE_NEW_DYNAREC
cpu_state.TOP--;
#else
cpu_state.TOP = (cpu_state.TOP - 1) & 7;
#endif
cpu_state.ST[cpu_state.TOP & 7] = i;
#ifdef USE_NEW_DYNAREC
cpu_state.tag[cpu_state.TOP & 7] = TAG_VALID;
#else
cpu_state.tag[cpu_state.TOP & 7] = (i == 0.0) ? TAG_VALID : 0;
#endif
}
static __inline void
x87_push_u64(uint64_t i)
{
union {
double d;
uint64_t ll;
} td;
td.ll = i;
#ifdef USE_NEW_DYNAREC
cpu_state.TOP--;
#else
cpu_state.TOP = (cpu_state.TOP - 1) & 7;
#endif
cpu_state.ST[cpu_state.TOP & 7] = td.d;
#ifdef USE_NEW_DYNAREC
cpu_state.tag[cpu_state.TOP & 7] = TAG_VALID;
#else
cpu_state.tag[cpu_state.TOP & 7] = (td.d == 0.0) ? TAG_VALID : 0;
#endif
}
static __inline double
x87_pop(void)
{
double t = cpu_state.ST[cpu_state.TOP & 7];
cpu_state.tag[cpu_state.TOP & 7] = TAG_EMPTY;
#ifdef USE_NEW_DYNAREC
cpu_state.TOP++;
#else
cpu_state.tag[cpu_state.TOP & 7] |= TAG_UINT64;
cpu_state.TOP = (cpu_state.TOP + 1) & 7;
#endif
return t;
}
static __inline int16_t
x87_fround16(double b)
{
double da;
double dc;
int16_t a;
int16_t c;
switch ((cpu_state.npxc >> 10) & 3) {
case 0: /*Nearest*/
da = floor(b);
dc = floor(b + 1.0);
a = (int16_t) da;
c = (int16_t) dc;
if ((b - a) < (c - b))
return a;
else if ((b - a) > (c - b))
return c;
else
return (a & 1) ? c : a;
case 1: /*Down*/
da = floor(b);
return (int16_t) da;
case 2: /*Up*/
da = ceil(b);
return (int16_t) da;
case 3: /*Chop*/
return (int16_t) b;
}
return 0;
}
static __inline int64_t
x87_fround16_64(double b)
{
return (int64_t) x87_fround16(b);
}
static __inline int32_t
x87_fround32(double b)
{
double da;
double dc;
int32_t a;
int32_t c;
switch ((cpu_state.npxc >> 10) & 3) {
case 0: /*Nearest*/
da = floor(b);
dc = floor(b + 1.0);
a = (int32_t) da;
c = (int32_t) dc;
if ((b - a) < (c - b))
return a;
else if ((b - a) > (c - b))
return c;
else
return (a & 1) ? c : a;
case 1: /*Down*/
da = floor(b);
return (int32_t) da;
case 2: /*Up*/
da = ceil(b);
return (int32_t) da;
case 3: /*Chop*/
return (int32_t) b;
}
return 0;
}
static __inline int64_t
x87_fround32_64(double b)
{
return (int64_t) x87_fround32(b);
}
static __inline int64_t
x87_fround(double b)
{
double da;
double dc;
int64_t a;
int64_t c;
switch ((cpu_state.npxc >> 10) & 3) {
case 0: /*Nearest*/
da = floor(b);
dc = floor(b + 1.0);
a = (int64_t) da;
c = (int64_t) dc;
if ((b - a) < (c - b))
return a;
else if ((b - a) > (c - b))
return c;
else
return (a & 1) ? c : a;
case 1: /*Down*/
da = floor(b);
return (int64_t) da;
case 2: /*Up*/
da = ceil(b);
return (int64_t) da;
case 3: /*Chop*/
return (int64_t) b;
}
return 0LL;
}
static __inline double
x87_ld80(void)
{
x87_conv_t test;
test.eind.ll = readmeml(easeg, cpu_state.eaaddr);
test.eind.ll |= (uint64_t) readmeml(easeg, cpu_state.eaaddr + 4) << 32;
test.begin = readmemw(easeg, cpu_state.eaaddr + 8);
return x87_from80(&test);
}
static __inline void
x87_st80(double d)
{
x87_conv_t test;
x87_to80(d, &test);
writememl(easeg, cpu_state.eaaddr, test.eind.ll & 0xffffffff);
writememl(easeg, cpu_state.eaaddr + 4, test.eind.ll >> 32);
writememw(easeg, cpu_state.eaaddr + 8, test.begin);
}
static __inline void
x87_st_fsave(int reg)
{
reg = (cpu_state.TOP + reg) & 7;
if (cpu_state.tag[reg] & TAG_UINT64) {
writememl(easeg, cpu_state.eaaddr, cpu_state.MM[reg].q & 0xffffffff);
writememl(easeg, cpu_state.eaaddr + 4, cpu_state.MM[reg].q >> 32);
writememw(easeg, cpu_state.eaaddr + 8, 0x5555);
} else
x87_st80(cpu_state.ST[reg]);
}
static __inline void
x87_ld_frstor(int reg)
{
reg = (cpu_state.TOP + reg) & 7;
cpu_state.MM[reg].q = readmemq(easeg, cpu_state.eaaddr);
cpu_state.MM_w4[reg] = readmemw(easeg, cpu_state.eaaddr + 8);
#ifdef USE_NEW_DYNAREC
if ((cpu_state.MM_w4[reg] == 0x5555) && (cpu_state.tag[reg] & TAG_UINT64))
#else
if ((cpu_state.MM_w4[reg] == 0x5555) && (cpu_state.tag[reg] == 2))
#endif
{
#ifndef USE_NEW_DYNAREC
cpu_state.tag[reg] = TAG_UINT64;
#endif
cpu_state.ST[reg] = (double) cpu_state.MM[reg].q;
} else {
#ifdef USE_NEW_DYNAREC
cpu_state.tag[reg] &= ~TAG_UINT64;
#endif
cpu_state.ST[reg] = x87_ld80();
}
}
static __inline void
x87_ldmmx(MMX_REG *r, uint16_t *w4)
{
r->l[0] = readmeml(easeg, cpu_state.eaaddr);
r->l[1] = readmeml(easeg, cpu_state.eaaddr + 4);
*w4 = readmemw(easeg, cpu_state.eaaddr + 8);
}
static __inline void
x87_stmmx(MMX_REG r)
{
writememl(easeg, cpu_state.eaaddr, r.l[0]);
writememl(easeg, cpu_state.eaaddr + 4, r.l[1]);
writememw(easeg, cpu_state.eaaddr + 8, 0xffff);
}
#include <inttypes.h>
static __inline uint16_t
x87_compare(double a, double b)
{
#ifdef X87_INLINE_ASM
uint32_t result;
double ea = a;
double eb = b;
const uint64_t ia = 0x3fec1a6ff866a936ULL;
const uint64_t ib = 0x3fec1a6ff866a938ULL;
/* Hack to make CHKCOP happy. */
if (!memcmp(&ea, &ia, 8) && !memcmp(&eb, &ib, 8))
return FPU_SW_C3;
if ((fpu_type < FPU_287XL) && !(cpu_state.npxc & 0x1000) && ((a == INFINITY) || (a == -INFINITY)) && ((b == INFINITY) || (b == -INFINITY)))
eb = ea;
# if !defined(_MSC_VER) || defined(__clang__)
/* Memory barrier, to force GCC to write to the input parameters
* before the compare rather than after */
__asm volatile(""
:
:
: "memory");
__asm(
"fldl %2\n"
"fldl %1\n"
"fclex\n"
"fcompp\n"
"fnstsw %0\n"
: "=m"(result)
: "m"(ea), "m"(eb));
# else
_ReadWriteBarrier();
_asm
{
fld eb
fld ea
fclex
fcompp
fnstsw result
}
# endif
return result & (FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
#else
/* Generic C version is known to give incorrect results in some
* situations, eg comparison of infinity (Unreal) */
uint32_t result = 0;
double ea = a, eb = b;
if ((fpu_type < FPU_287XL) && !(cpu_state.npxc & 0x1000) && ((a == INFINITY) || (a == -INFINITY)) && ((b == INFINITY) || (b == -INFINITY)))
eb = ea;
if (ea == eb)
result |= FPU_SW_C3;
else if (ea < eb)
result |= FPU_SW_C0;
return result;
#endif
}
static __inline uint16_t
x87_ucompare(double a, double b)
{
#ifdef X87_INLINE_ASM
uint32_t result;
# if !defined(_MSC_VER) || defined(__clang__)
/* Memory barrier, to force GCC to write to the input parameters
* before the compare rather than after */
__asm volatile(""
:
:
: "memory");
__asm(
"fldl %2\n"
"fldl %1\n"
"fclex\n"
"fucompp\n"
"fnstsw %0\n"
: "=m"(result)
: "m"(a), "m"(b));
# else
_ReadWriteBarrier();
_asm
{
fld b
fld a
fclex
fcompp
fnstsw result
}
# endif
return result & (FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
#else
/* Generic C version is known to give incorrect results in some
* situations, eg comparison of infinity (Unreal) */
uint32_t result = 0;
if (a == b)
result |= FPU_SW_C3;
else if (a < b)
result |= FPU_SW_C0;
return result;
#endif
}
typedef union {
float s;
uint32_t i;
} x87_ts;
typedef union {
double d;
uint64_t i;
} x87_td;
#ifdef FPU_8087
# define FP_ENTER() \
{ \
}
#else
# define FP_ENTER() \
do { \
if (cr0 & 0xc) { \
x86_int(7); \
return 1; \
} \
} while (0)
#endif
#ifdef USE_NEW_DYNAREC
# define FP_TAG_VALID cpu_state.tag[cpu_state.TOP & 7] = TAG_VALID
# define FP_TAG_VALID_F cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] = TAG_VALID
# define FP_TAG_DEFAULT cpu_state.tag[cpu_state.TOP & 7] = TAG_VALID | TAG_UINT64
# define FP_TAG_VALID_N cpu_state.tag[(cpu_state.TOP + 1) & 7] = TAG_VALID
#else
# define FP_TAG_VALID cpu_state.tag[cpu_state.TOP] &= ~TAG_UINT64
# define FP_TAG_VALID_F cpu_state.tag[(cpu_state.TOP + fetchdat) & 7] &= ~TAG_UINT64
# define FP_TAG_DEFAULT cpu_state.tag[cpu_state.TOP] |= TAG_UINT64;
# define FP_TAG_VALID_N cpu_state.tag[(cpu_state.TOP + 1) & 7] &= ~TAG_UINT64
#endif
#include "softfloat3e/softfloat-specialize.h"
#include "softfloat3e/fpu_trans.h"
#include "x87_ops_sf_arith.h"
#include "x87_ops_sf_compare.h"
#include "x87_ops_sf_const.h"
#include "x87_ops_sf_load_store.h"
#include "x87_ops_sf_misc.h"
#include "x87_ops_sf_trans.h"
#include "x87_ops_sf.h"
#include "x87_ops_arith.h"
#include "x87_ops_misc.h"
#include "x87_ops_loadstore.h"
#ifndef FPU_8087
static int
op_nofpu_a16(uint32_t fetchdat)
{
if (cr0 & 0xc) {
x86_int(7);
return 1;
} else {
fetch_ea_16(fetchdat);
return 0;
}
}
static int
op_nofpu_a32(uint32_t fetchdat)
{
if (cr0 & 0xc) {
x86_int(7);
return 1;
} else {
fetch_ea_32(fetchdat);
return 0;
}
}
#endif
#ifdef FPU_8087
static int
FPU_ILLEGAL_a16(uint32_t fetchdat)
{
geteaw();
wait(timing_rr, 0);
return 0;
}
#else
static int
FPU_ILLEGAL_a16(uint32_t fetchdat)
{
fetch_ea_16(fetchdat);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
FPU_ILLEGAL_a32(uint32_t fetchdat)
{
fetch_ea_32(fetchdat);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
#endif
#define ILLEGAL_a16 FPU_ILLEGAL_a16
#ifdef FPU_8087
const OpFn OP_TABLE(sf_fpu_8087_d8)[32] = {
// clang-format off
sf_FADDs_a16, sf_FMULs_a16, sf_FCOMs_a16, sf_FCOMPs_a16, sf_FSUBs_a16, sf_FSUBRs_a16, sf_FDIVs_a16, sf_FDIVRs_a16,
sf_FADDs_a16, sf_FMULs_a16, sf_FCOMs_a16, sf_FCOMPs_a16, sf_FSUBs_a16, sf_FSUBRs_a16, sf_FDIVs_a16, sf_FDIVRs_a16,
sf_FADDs_a16, sf_FMULs_a16, sf_FCOMs_a16, sf_FCOMPs_a16, sf_FSUBs_a16, sf_FSUBRs_a16, sf_FDIVs_a16, sf_FDIVRs_a16,
sf_FADD_st0_stj, sf_FMUL_st0_stj, sf_FCOM_sti, sf_FCOMP_sti, sf_FSUB_st0_stj, sf_FSUBR_st0_stj, sf_FDIV_st0_stj, sf_FDIVR_st0_stj,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_d9)[256] = {
// clang-format off
sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FNOP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, /*Invalid*/
sf_FCHS, sf_FABS, ILLEGAL_a16, ILLEGAL_a16, sf_FTST, sf_FXAM, ILLEGAL_a16, ILLEGAL_a16,
sf_FLD1, sf_FLDL2T, sf_FLDL2E, sf_FLDPI, sf_FLDEG2, sf_FLDLN2, sf_FLDZ, ILLEGAL_a16,
sf_F2XM1, sf_FYL2X, sf_FPTAN, sf_FPATAN, sf_FXTRACT, sf_FPREM1, sf_FDECSTP, sf_FINCSTP,
sf_FPREM, sf_FYL2XP1, sf_FSQRT, ILLEGAL_a16, sf_FRNDINT, sf_FSCALE, ILLEGAL_a16, ILLEGAL_a16
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_da)[256] = {
// clang-format off
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_db)[256] = {
// clang-format off
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FI, sf_FI, sf_FNCLEX, sf_FNINIT, ILLEGAL_a16, sf_FNOP, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_dc)[32] = {
// clang-format off
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADD_sti_st0, sf_FMUL_sti_st0, ILLEGAL_a16, ILLEGAL_a16, sf_FSUBR_sti_st0, sf_FSUB_sti_st0, sf_FDIVR_sti_st0, sf_FDIV_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_dd)[256] = {
// clang-format off
sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_de)[256] = {
// clang-format off
sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0,
sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, sf_FCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0,
sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0,
sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0,
sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_8087_df)[256] = {
// clang-format off
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_d8)[32] = {
// clang-format off
opFADDs_a16, opFMULs_a16, opFCOMs_a16, opFCOMPs_a16, opFSUBs_a16, opFSUBRs_a16, opFDIVs_a16, opFDIVRs_a16,
opFADDs_a16, opFMULs_a16, opFCOMs_a16, opFCOMPs_a16, opFSUBs_a16, opFSUBRs_a16, opFDIVs_a16, opFDIVRs_a16,
opFADDs_a16, opFMULs_a16, opFCOMs_a16, opFCOMPs_a16, opFSUBs_a16, opFSUBRs_a16, opFDIVs_a16, opFDIVRs_a16,
opFADD, opFMUL, opFCOM, opFCOMP, opFSUB, opFSUBR, opFDIV, opFDIVR
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_d9)[256] = {
// clang-format off
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFNOP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, /*Invalid*/
opFCHS, opFABS, ILLEGAL_a16, ILLEGAL_a16, opFTST, opFXAM, ILLEGAL_a16, ILLEGAL_a16,
opFLD1, opFLDL2T, opFLDL2E, opFLDPI, opFLDEG2, opFLDLN2, opFLDZ, ILLEGAL_a16,
opF2XM1, opFYL2X, opFPTAN, opFPATAN, opFXTRACT, opFPREM1, opFDECSTP, opFINCSTP,
opFPREM, opFYL2XP1, opFSQRT, ILLEGAL_a16, opFRNDINT, opFSCALE, ILLEGAL_a16, ILLEGAL_a16
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_da)[256] = {
// clang-format off
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_db)[256] = {
// clang-format off
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFI, opFI, opFCLEX, opFINIT, ILLEGAL_a16, opFNOP, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_dc)[32] = {
// clang-format off
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDr, opFMULr, ILLEGAL_a16, ILLEGAL_a16, opFSUBRr, opFSUBr, opFDIVRr, opFDIVr
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_dd)[256] = {
// clang-format off
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFST, opFST, opFST, opFST, opFST, opFST, opFST, opFST,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_de)[256] = {
// clang-format off
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP,
opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, opFCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP,
opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP,
opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP,
opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP,
// clang-format on
};
const OpFn OP_TABLE(fpu_8087_df)[256] = {
// clang-format off
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
#else
# define ILLEGAL_a32 FPU_ILLEGAL_a32
const OpFn OP_TABLE(sf_fpu_d8_a16)[32] = {
// clang-format off
/*0x00*/ sf_FADDs_a16, sf_FMULs_a16, sf_FCOMs_a16, sf_FCOMPs_a16, sf_FSUBs_a16, sf_FSUBRs_a16, sf_FDIVs_a16, sf_FDIVRs_a16,
/*0x08*/ sf_FADDs_a16, sf_FMULs_a16, sf_FCOMs_a16, sf_FCOMPs_a16, sf_FSUBs_a16, sf_FSUBRs_a16, sf_FDIVs_a16, sf_FDIVRs_a16,
/*0x10*/ sf_FADDs_a16, sf_FMULs_a16, sf_FCOMs_a16, sf_FCOMPs_a16, sf_FSUBs_a16, sf_FSUBRs_a16, sf_FDIVs_a16, sf_FDIVRs_a16,
/*0x18*/ sf_FADD_st0_stj, sf_FMUL_st0_stj, sf_FCOM_sti, sf_FCOMP_sti, sf_FSUB_st0_stj, sf_FSUBR_st0_stj, sf_FDIV_st0_stj, sf_FDIVR_st0_stj,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_d8_a32)[32] = {
// clang-format off
sf_FADDs_a32, sf_FMULs_a32, sf_FCOMs_a32, sf_FCOMPs_a32, sf_FSUBs_a32, sf_FSUBRs_a32, sf_FDIVs_a32, sf_FDIVRs_a32,
sf_FADDs_a32, sf_FMULs_a32, sf_FCOMs_a32, sf_FCOMPs_a32, sf_FSUBs_a32, sf_FSUBRs_a32, sf_FDIVs_a32, sf_FDIVRs_a32,
sf_FADDs_a32, sf_FMULs_a32, sf_FCOMs_a32, sf_FCOMPs_a32, sf_FSUBs_a32, sf_FSUBRs_a32, sf_FDIVs_a32, sf_FDIVRs_a32,
sf_FADD_st0_stj, sf_FMUL_st0_stj, sf_FCOM_sti, sf_FCOMP_sti, sf_FSUB_st0_stj, sf_FSUBR_st0_stj, sf_FDIV_st0_stj, sf_FDIVR_st0_stj,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_d9_a16)[256] = {
// clang-format off
sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FNOP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, /*Invalid*/
sf_FCHS, sf_FABS, ILLEGAL_a16, ILLEGAL_a16, sf_FTST, sf_FXAM, ILLEGAL_a16, ILLEGAL_a16,
sf_FLD1, sf_FLDL2T, sf_FLDL2E, sf_FLDPI, sf_FLDEG2, sf_FLDLN2, sf_FLDZ, ILLEGAL_a16,
sf_F2XM1, sf_FYL2X, sf_FPTAN, sf_FPATAN, sf_FXTRACT, sf_FPREM1, sf_FDECSTP, sf_FINCSTP,
sf_FPREM, sf_FYL2XP1, sf_FSQRT, sf_FSINCOS, sf_FRNDINT, sf_FSCALE, sf_FSIN, sf_FCOS,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_d9_a32)[256] = {
// clang-format off
sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32,
sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32,
sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32,
sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32,
sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32,
sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32,
sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32,
sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32,
sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32,
sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32,
sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32,
sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32,
sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32,
sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32,
sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32,
sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32,
sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32,
sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32,
sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FNOP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, /*Invalid*/
sf_FCHS, sf_FABS, ILLEGAL_a32, ILLEGAL_a32, sf_FTST, sf_FXAM, ILLEGAL_a32, ILLEGAL_a32,
sf_FLD1, sf_FLDL2T, sf_FLDL2E, sf_FLDPI, sf_FLDEG2, sf_FLDLN2, sf_FLDZ, ILLEGAL_a32,
sf_F2XM1, sf_FYL2X, sf_FPTAN, sf_FPATAN, sf_FXTRACT, sf_FPREM1, sf_FDECSTP, sf_FINCSTP,
sf_FPREM, sf_FYL2XP1, sf_FSQRT, sf_FSINCOS, sf_FRNDINT, sf_FSCALE, sf_FSIN, sf_FCOS,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_d9_a16)[256] = {
// clang-format off
/*0x00*/ sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
/*0x08*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x10*/ sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
/*0x18*/ sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
/*0x20*/ sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
/*0x28*/ sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
/*0x30*/ sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
/*0x38*/ sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
/*0x40*/ sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
/*0x48*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x50*/ sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
/*0x58*/ sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
/*0x60*/ sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
/*0x68*/ sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
/*0x70*/ sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
/*0x78*/ sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
/*0x80*/ sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16, sf_FLDs_a16,
/*0x88*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x90*/ sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16, sf_FSTs_a16,
/*0x98*/ sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16, sf_FSTPs_a16,
/*0xa0*/ sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16, sf_FLDENV_a16,
/*0xa8*/ sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16, sf_FLDCW_a16,
/*0xb0*/ sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16, sf_FNSTENV_a16,
/*0xb8*/ sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16, sf_FNSTCW_a16,
/*0xc0*/ sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti,
/*0xc8*/ sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
/*0xd0*/ sf_FNOP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xd8*/ sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, /*Invalid*/
/*0xe0*/ sf_FCHS, sf_FABS, ILLEGAL_a16, ILLEGAL_a16, sf_FTST, sf_FXAM, ILLEGAL_a16, ILLEGAL_a16,
/*0xe8*/ sf_FLD1, sf_FLDL2T, sf_FLDL2E, sf_FLDPI, sf_FLDEG2, sf_FLDLN2, sf_FLDZ, ILLEGAL_a16,
/*0xf0*/ sf_F2XM1, sf_FYL2X, sf_FPTAN, sf_FPATAN, sf_FXTRACT, sf_FPREM1, sf_FDECSTP, sf_FINCSTP,
/*0xf8*/ sf_FPREM, sf_FYL2XP1, sf_FSQRT, sf_FSINCOS, sf_FRNDINT, sf_FSCALE, sf_FSIN, sf_FCOS,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_d9_a32)[256] = {
// clang-format off
sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32,
sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32,
sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32,
sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32,
sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32,
sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32,
sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32,
sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32,
sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32,
sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32,
sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32,
sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32,
sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32, sf_FLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32, sf_FSTs_a32,
sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32, sf_FSTPs_a32,
sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32, sf_FLDENV_a32,
sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32, sf_FLDCW_a32,
sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32, sf_FNSTENV_a32,
sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32, sf_FNSTCW_a32,
sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti, sf_FLD_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FNOP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, /*Invalid*/
sf_FCHS, sf_FABS, ILLEGAL_a32, ILLEGAL_a32, sf_FTST, sf_FXAM, ILLEGAL_a32, ILLEGAL_a32,
sf_FLD1, sf_FLDL2T, sf_FLDL2E, sf_FLDPI, sf_FLDEG2, sf_FLDLN2, sf_FLDZ, ILLEGAL_a32,
sf_F2XM1, sf_FYL2X, sf_FPTAN, sf_FPATAN, sf_FXTRACT, sf_FPREM1, sf_FDECSTP, sf_FINCSTP,
sf_FPREM, sf_FYL2XP1, sf_FSQRT, sf_FSINCOS, sf_FRNDINT, sf_FSCALE, sf_FSIN, sf_FCOS,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_da_a16)[256] = {
// clang-format off
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_da_a32)[256] = {
// clang-format off
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_da_a16)[256] = {
// clang-format off
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, sf_FUCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_da_a32)[256] = {
// clang-format off
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, sf_FUCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# ifndef OPS_286_386
const OpFn OP_TABLE(sf_fpu_686_da_a16)[256] = {
// clang-format off
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16, sf_FADDil_a16,
sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16, sf_FMULil_a16,
sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16, sf_FCOMil_a16,
sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16, sf_FCOMPil_a16,
sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16, sf_FSUBil_a16,
sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16, sf_FSUBRil_a16,
sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16, sf_FDIVil_a16,
sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16, sf_FDIVRil_a16,
sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB,
sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE,
sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE,
sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, sf_FUCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_686_da_a32)[256] = {
// clang-format off
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32, sf_FADDil_a32,
sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32, sf_FMULil_a32,
sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32, sf_FCOMil_a32,
sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32, sf_FCOMPil_a32,
sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32, sf_FSUBil_a32,
sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32, sf_FSUBRil_a32,
sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32, sf_FDIVil_a32,
sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32, sf_FDIVRil_a32,
sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB, sf_FCMOVB,
sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE, sf_FCMOVE,
sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE, sf_FCMOVBE,
sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU, sf_FCMOVU,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, sf_FUCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# endif
const OpFn OP_TABLE(sf_fpu_287_db_a16)[256] = {
// clang-format off
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNOP, sf_FNOP, sf_FNCLEX, sf_FNINIT, sf_FNOP, sf_FNOP, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_db_a32)[256] = {
// clang-format off
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNOP, sf_FNOP, sf_FNCLEX, sf_FNINIT, sf_FNOP, sf_FNOP, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_db_a16)[256] = {
// clang-format off
/*0x00*/ sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
/*0x08*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x10*/ sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
/*0x18*/ sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
/*0x20*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x28*/ sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
/*0x30*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x38*/ sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
/*0x40*/ sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
/*0x48*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x50*/ sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
/*0x58*/ sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
/*0x60*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x68*/ sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
/*0x70*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x78*/ sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
/*0x80*/ sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
/*0x88*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x90*/ sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
/*0x98*/ sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
/*0xa0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xa8*/ sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
/*0xb0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xb8*/ sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
/*0xc0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xc8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xd0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xd8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xe0*/ sf_FNOP, sf_FNOP, sf_FNCLEX, sf_FNINIT, sf_FNOP, sf_FNOP, ILLEGAL_a16, ILLEGAL_a16,
/*0xe8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xf0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xf8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_db_a32)[256] = {
// clang-format off
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNOP, sf_FNOP, sf_FNCLEX, sf_FNINIT, sf_FNOP, sf_FNOP, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# ifndef OPS_286_386
const OpFn OP_TABLE(sf_fpu_686_db_a16)[256] = {
// clang-format off
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16, sf_FILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16, sf_FISTil_a16,
sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16, sf_FISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16, sf_FLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16, sf_FSTPe_a16,
sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB,
sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE,
sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE,
sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU,
sf_FNOP, sf_FNOP, sf_FNCLEX, sf_FNINIT, sf_FNOP, sf_FNOP, ILLEGAL_a16, ILLEGAL_a16,
sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj,
sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_686_db_a32)[256] = {
// clang-format off
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32, sf_FILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32, sf_FISTil_a32,
sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32, sf_FISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32, sf_FLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32, sf_FSTPe_a32,
sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB, sf_FCMOVNB,
sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE, sf_FCMOVNE,
sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE, sf_FCMOVNBE,
sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU, sf_FCMOVNU,
sf_FNOP, sf_FNOP, sf_FNCLEX, sf_FNINIT, sf_FNOP, sf_FNOP, ILLEGAL_a32, ILLEGAL_a32,
sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj, sf_FUCOMI_st0_stj,
sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj, sf_FCOMI_st0_stj,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# endif
const OpFn OP_TABLE(sf_fpu_287_dc_a16)[32] = {
// clang-format off
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADD_sti_st0, sf_FMUL_sti_st0, ILLEGAL_a16, ILLEGAL_a16, sf_FSUBR_sti_st0, sf_FSUB_sti_st0, sf_FDIVR_sti_st0, sf_FDIV_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_dc_a32)[32] = {
// clang-format off
sf_FADDd_a32, sf_FMULd_a32, sf_FCOMd_a32, sf_FCOMPd_a32, sf_FSUBd_a32, sf_FSUBRd_a32, sf_FDIVd_a32, sf_FDIVRd_a32,
sf_FADDd_a32, sf_FMULd_a32, sf_FCOMd_a32, sf_FCOMPd_a32, sf_FSUBd_a32, sf_FSUBRd_a32, sf_FDIVd_a32, sf_FDIVRd_a32,
sf_FADDd_a32, sf_FMULd_a32, sf_FCOMd_a32, sf_FCOMPd_a32, sf_FSUBd_a32, sf_FSUBRd_a32, sf_FDIVd_a32, sf_FDIVRd_a32,
sf_FADD_sti_st0, sf_FMUL_sti_st0, ILLEGAL_a32, ILLEGAL_a32, sf_FSUBR_sti_st0, sf_FSUB_sti_st0, sf_FDIVR_sti_st0, sf_FDIV_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_dc_a16)[32] = {
// clang-format off
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADDd_a16, sf_FMULd_a16, sf_FCOMd_a16, sf_FCOMPd_a16, sf_FSUBd_a16, sf_FSUBRd_a16, sf_FDIVd_a16, sf_FDIVRd_a16,
sf_FADD_sti_st0, sf_FMUL_sti_st0, sf_FCOM_sti, sf_FCOMP_sti, sf_FSUBR_sti_st0, sf_FSUB_sti_st0, sf_FDIVR_sti_st0, sf_FDIV_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_dc_a32)[32] = {
// clang-format off
sf_FADDd_a32, sf_FMULd_a32, sf_FCOMd_a32, sf_FCOMPd_a32, sf_FSUBd_a32, sf_FSUBRd_a32, sf_FDIVd_a32, sf_FDIVRd_a32,
sf_FADDd_a32, sf_FMULd_a32, sf_FCOMd_a32, sf_FCOMPd_a32, sf_FSUBd_a32, sf_FSUBRd_a32, sf_FDIVd_a32, sf_FDIVRd_a32,
sf_FADDd_a32, sf_FMULd_a32, sf_FCOMd_a32, sf_FCOMPd_a32, sf_FSUBd_a32, sf_FSUBRd_a32, sf_FDIVd_a32, sf_FDIVRd_a32,
sf_FADD_sti_st0, sf_FMUL_sti_st0, sf_FCOM_sti, sf_FCOMP_sti, sf_FSUBR_sti_st0, sf_FSUB_sti_st0, sf_FDIVR_sti_st0, sf_FDIV_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_dd_a16)[256] = {
// clang-format off
sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_dd_a32)[256] = {
// clang-format off
sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32,
sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32,
sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32,
sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32,
sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32,
sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32,
sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32,
sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32,
sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32,
sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32,
sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32,
sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32,
sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_dd_a16)[256] = {
// clang-format off
/*0x00*/ sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
/*0x08*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x10*/ sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
/*0x18*/ sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
/*0x20*/ sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
/*0x28*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x30*/ sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
/*0x38*/ sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
/*0x40*/ sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
/*0x48*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x50*/ sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
/*0x58*/ sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
/*0x60*/ sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
/*0x68*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x70*/ sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
/*0x78*/ sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
/*0x80*/ sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16, sf_FLDd_a16,
/*0x88*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x90*/ sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16, sf_FSTd_a16,
/*0x98*/ sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16, sf_FSTPd_a16,
/*0xa0*/ sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16, sf_FRSTOR_a16,
/*0xa8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xb0*/ sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16, sf_FNSAVE_a16,
/*0xb8*/ sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16, sf_FNSTSW_a16,
/*0xc0*/ sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti,
/*0xc8*/ sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
/*0xd0*/ sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti,
/*0xd8*/ sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
/*0xe0*/ sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti,
/*0xe8*/ sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti,
/*0xf0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xf8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_dd_a32)[256] = {
// clang-format off
sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32,
sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32,
sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32,
sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32,
sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32,
sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32,
sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32,
sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32,
sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32, sf_FLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32, sf_FSTd_a32,
sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32, sf_FSTPd_a32,
sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32, sf_FRSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32, sf_FNSAVE_a32,
sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32, sf_FNSTSW_a32,
sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti, sf_FFREE_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti, sf_FST_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti, sf_FUCOM_sti,
sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti, sf_FUCOMP_sti,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_de_a16)[256] = {
// clang-format off
sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0,
sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, sf_FCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0,
sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0,
sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0,
sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_de_a32)[256] = {
// clang-format off
sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32,
sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32,
sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32,
sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32,
sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32,
sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32,
sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32,
sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32,
sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32,
sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32,
sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32,
sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32,
sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32,
sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32,
sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32,
sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32,
sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32,
sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32,
sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32,
sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32,
sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32,
sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32,
sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32,
sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32,
sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0,
sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, sf_FCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0,
sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0,
sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0,
sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_de_a16)[256] = {
// clang-format off
/*0x00*/ sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
/*0x08*/ sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
/*0x10*/ sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
/*0x18*/ sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
/*0x20*/ sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
/*0x28*/ sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
/*0x30*/ sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
/*0x38*/ sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
/*0x40*/ sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
/*0x48*/ sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
/*0x50*/ sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
/*0x58*/ sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
/*0x60*/ sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
/*0x68*/ sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
/*0x70*/ sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
/*0x78*/ sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
/*0x80*/ sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16, sf_FADDiw_a16,
/*0x88*/ sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16, sf_FMULiw_a16,
/*0x90*/ sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16, sf_FCOMiw_a16,
/*0x98*/ sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16, sf_FCOMPiw_a16,
/*0xa0*/ sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16, sf_FSUBiw_a16,
/*0xa8*/ sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16, sf_FSUBRiw_a16,
/*0xb0*/ sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16, sf_FDIViw_a16,
/*0xb8*/ sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16, sf_FDIVRiw_a16,
/*0xc0*/ sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0,
/*0xc8*/ sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0,
/*0xd0*/ sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti,
/*0xd8*/ ILLEGAL_a16, sf_FCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xe0*/ sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0,
/*0xe8*/ sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0,
/*0xf0*/ sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0,
/*0xf8*/ sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_de_a32)[256] = {
// clang-format off
sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32,
sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32,
sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32,
sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32,
sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32,
sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32,
sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32,
sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32,
sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32,
sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32,
sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32,
sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32,
sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32,
sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32,
sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32,
sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32,
sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32, sf_FADDiw_a32,
sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32, sf_FMULiw_a32,
sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32, sf_FCOMiw_a32,
sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32, sf_FCOMPiw_a32,
sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32, sf_FSUBiw_a32,
sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32, sf_FSUBRiw_a32,
sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32, sf_FDIViw_a32,
sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32, sf_FDIVRiw_a32,
sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0, sf_FADDP_sti_st0,
sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0, sf_FMULP_sti_st0,
sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti, sf_FCOMP_sti,
ILLEGAL_a32, sf_FCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0, sf_FSUBRP_sti_st0,
sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0, sf_FSUBP_sti_st0,
sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0, sf_FDIVRP_sti_st0,
sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0, sf_FDIVP_sti_st0,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_df_a16)[256] = {
// clang-format off
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FNSTSW_AX, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_287_df_a32)[256] = {
// clang-format off
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FNSTSW_AX, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_df_a16)[256] = {
// clang-format off
/*0x00*/ sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
/*0x08*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x10*/ sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
/*0x18*/ sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
/*0x20*/ sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
/*0x28*/ sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
/*0x30*/ sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
/*0x38*/ sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
/*0x40*/ sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
/*0x48*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x50*/ sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
/*0x58*/ sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
/*0x60*/ sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
/*0x68*/ sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
/*0x70*/ sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
/*0x78*/ sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
/*0x80*/ sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
/*0x88*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0x90*/ sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
/*0x98*/ sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
/*0xa0*/ sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
/*0xa8*/ sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
/*0xb0*/ sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
/*0xb8*/ sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
/*0xc0*/ sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
/*0xc8*/ sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
/*0xd0*/ sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
/*0xd8*/ sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
/*0xe0*/ sf_FNSTSW_AX, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xe8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xf0*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
/*0xf8*/ ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_df_a32)[256] = {
// clang-format off
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FNSTSW_AX, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# ifndef OPS_286_386
const OpFn OP_TABLE(sf_fpu_686_df_a16)[256] = {
// clang-format off
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16, sf_FILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16, sf_FISTiw_a16,
sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16, sf_FISTPiw_a16,
sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16, sf_FBLD_PACKED_BCD_a16,
sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16, sf_FILDiq_a16,
sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16, sf_FBSTP_PACKED_BCD_a16,
sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16, sf_FISTPiq_a16,
sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FNSTSW_AX, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj,
sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(sf_fpu_686_df_a32)[256] = {
// clang-format off
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32, sf_FILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32, sf_FISTiw_a32,
sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32, sf_FISTPiw_a32,
sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32, sf_FBLD_PACKED_BCD_a32,
sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32, sf_FILDiq_a32,
sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32, sf_FBSTP_PACKED_BCD_a32,
sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32, sf_FISTPiq_a32,
sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti, sf_FFREEP_sti,
sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti, sf_FXCH_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti, sf_FSTP_sti,
sf_FNSTSW_AX, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj, sf_FUCOMIP_st0_stj,
sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj, sf_FCOMIP_st0_stj,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# endif
const OpFn OP_TABLE(fpu_d8_a16)[32] = {
// clang-format off
opFADDs_a16, opFMULs_a16, opFCOMs_a16, opFCOMPs_a16, opFSUBs_a16, opFSUBRs_a16, opFDIVs_a16, opFDIVRs_a16,
opFADDs_a16, opFMULs_a16, opFCOMs_a16, opFCOMPs_a16, opFSUBs_a16, opFSUBRs_a16, opFDIVs_a16, opFDIVRs_a16,
opFADDs_a16, opFMULs_a16, opFCOMs_a16, opFCOMPs_a16, opFSUBs_a16, opFSUBRs_a16, opFDIVs_a16, opFDIVRs_a16,
opFADD, opFMUL, opFCOM, opFCOMP, opFSUB, opFSUBR, opFDIV, opFDIVR
// clang-format on
};
const OpFn OP_TABLE(fpu_d8_a32)[32] = {
// clang-format off
opFADDs_a32, opFMULs_a32, opFCOMs_a32, opFCOMPs_a32, opFSUBs_a32, opFSUBRs_a32, opFDIVs_a32, opFDIVRs_a32,
opFADDs_a32, opFMULs_a32, opFCOMs_a32, opFCOMPs_a32, opFSUBs_a32, opFSUBRs_a32, opFDIVs_a32, opFDIVRs_a32,
opFADDs_a32, opFMULs_a32, opFCOMs_a32, opFCOMPs_a32, opFSUBs_a32, opFSUBRs_a32, opFDIVs_a32, opFDIVRs_a32,
opFADD, opFMUL, opFCOM, opFCOMP, opFSUB, opFSUBR, opFDIV, opFDIVR
// clang-format on
};
const OpFn OP_TABLE(fpu_287_d9_a16)[256] = {
// clang-format off
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFNOP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, /*Invalid*/
opFCHS, opFABS, ILLEGAL_a16, ILLEGAL_a16, opFTST, opFXAM, ILLEGAL_a16, ILLEGAL_a16,
opFLD1, opFLDL2T, opFLDL2E, opFLDPI, opFLDEG2, opFLDLN2, opFLDZ, ILLEGAL_a16,
opF2XM1, opFYL2X, opFPTAN, opFPATAN, opFXTRACT, opFPREM1, opFDECSTP, opFINCSTP,
opFPREM, opFYL2XP1, opFSQRT, opFSINCOS, opFRNDINT, opFSCALE, opFSIN, opFCOS
// clang-format on
};
const OpFn OP_TABLE(fpu_287_d9_a32)[256] = {
// clang-format off
opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32,
opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32,
opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32,
opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32,
opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32,
opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32,
opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32,
opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32,
opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32,
opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32,
opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32,
opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32,
opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32,
opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32,
opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32,
opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32,
opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32,
opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32,
opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFNOP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, /*Invalid*/
opFCHS, opFABS, ILLEGAL_a32, ILLEGAL_a32, opFTST, opFXAM, ILLEGAL_a32, ILLEGAL_a32,
opFLD1, opFLDL2T, opFLDL2E, opFLDPI, opFLDEG2, opFLDLN2, opFLDZ, ILLEGAL_a32,
opF2XM1, opFYL2X, opFPTAN, opFPATAN, opFXTRACT, opFPREM1, opFDECSTP, opFINCSTP,
opFPREM, opFYL2XP1, opFSQRT, opFSINCOS, opFRNDINT, opFSCALE, opFSIN, opFCOS
// clang-format on
};
const OpFn OP_TABLE(fpu_d9_a16)[256] = {
// clang-format off
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16, opFLDs_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16, opFSTs_a16,
opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16, opFSTPs_a16,
opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16, opFLDENV_a16,
opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16, opFLDCW_a16,
opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16, opFSTENV_a16,
opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16, opFSTCW_a16,
opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFNOP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, /*Invalid*/
opFCHS, opFABS, ILLEGAL_a16, ILLEGAL_a16, opFTST, opFXAM, ILLEGAL_a16, ILLEGAL_a16,
opFLD1, opFLDL2T, opFLDL2E, opFLDPI, opFLDEG2, opFLDLN2, opFLDZ, ILLEGAL_a16,
opF2XM1, opFYL2X, opFPTAN, opFPATAN, opFXTRACT, opFPREM1, opFDECSTP, opFINCSTP,
opFPREM, opFYL2XP1, opFSQRT, opFSINCOS, opFRNDINT, opFSCALE, opFSIN, opFCOS
// clang-format on
};
const OpFn OP_TABLE(fpu_d9_a32)[256] = {
// clang-format off
opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32,
opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32,
opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32,
opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32,
opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32,
opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32,
opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32,
opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32,
opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32,
opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32,
opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32,
opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32,
opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32, opFLDs_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32, opFSTs_a32,
opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32, opFSTPs_a32,
opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32, opFLDENV_a32,
opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32, opFLDCW_a32,
opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32, opFSTENV_a32,
opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32, opFSTCW_a32,
opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD, opFLD,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFNOP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, /*Invalid*/
opFCHS, opFABS, ILLEGAL_a32, ILLEGAL_a32, opFTST, opFXAM, ILLEGAL_a32, ILLEGAL_a32,
opFLD1, opFLDL2T, opFLDL2E, opFLDPI, opFLDEG2, opFLDLN2, opFLDZ, ILLEGAL_a32,
opF2XM1, opFYL2X, opFPTAN, opFPATAN, opFXTRACT, opFPREM1, opFDECSTP, opFINCSTP,
opFPREM, opFYL2XP1, opFSQRT, opFSINCOS, opFRNDINT, opFSCALE, opFSIN, opFCOS
// clang-format on
};
const OpFn OP_TABLE(fpu_287_da_a16)[256] = {
// clang-format off
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_da_a32)[256] = {
// clang-format off
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(fpu_da_a16)[256] = {
// clang-format off
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, opFUCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_da_a32)[256] = {
// clang-format off
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, opFUCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# ifndef OPS_286_386
const OpFn OP_TABLE(fpu_686_da_a16)[256] = {
// clang-format off
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16, opFADDil_a16,
opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16, opFMULil_a16,
opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16, opFCOMil_a16,
opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16, opFCOMPil_a16,
opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16, opFSUBil_a16,
opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16, opFSUBRil_a16,
opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16, opFDIVil_a16,
opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16, opFDIVRil_a16,
opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB,
opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE,
opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE,
opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, opFUCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_686_da_a32)[256] = {
// clang-format off
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32, opFADDil_a32,
opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32, opFMULil_a32,
opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32, opFCOMil_a32,
opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32, opFCOMPil_a32,
opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32, opFSUBil_a32,
opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32, opFSUBRil_a32,
opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32, opFDIVil_a32,
opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32, opFDIVRil_a32,
opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB, opFCMOVB,
opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE, opFCMOVE,
opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE, opFCMOVBE,
opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU, opFCMOVU,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, opFUCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# endif
const OpFn OP_TABLE(fpu_287_db_a16)[256] = {
// clang-format off
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFNOP, opFNOP, opFCLEX, opFINIT, opFNOP, opFNOP, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_db_a32)[256] = {
// clang-format off
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFNOP, opFNOP, opFCLEX, opFINIT, opFNOP, opFNOP, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(fpu_db_a16)[256] = {
// clang-format off
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFNOP, opFNOP, opFCLEX, opFINIT, opFNOP, opFNOP, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_db_a32)[256] = {
// clang-format off
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFNOP, opFNOP, opFCLEX, opFINIT, opFNOP, opFNOP, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# ifndef OPS_286_386
const OpFn OP_TABLE(fpu_686_db_a16)[256] = {
// clang-format off
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16, opFILDil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16, opFISTil_a16,
opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16, opFISTPil_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16, opFLDe_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16, opFSTPe_a16,
opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB,
opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE,
opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE,
opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU,
opFNOP, opFNOP, opFCLEX, opFINIT, opFNOP, opFNOP, ILLEGAL_a16, ILLEGAL_a16,
opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI,
opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_686_db_a32)[256] = {
// clang-format off
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32, opFILDil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32, opFISTil_a32,
opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32, opFISTPil_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32, opFLDe_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32, opFSTPe_a32,
opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB, opFCMOVNB,
opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE, opFCMOVNE,
opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE, opFCMOVNBE,
opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU, opFCMOVNU,
opFNOP, opFNOP, opFCLEX, opFINIT, opFNOP, opFNOP, ILLEGAL_a32, ILLEGAL_a32,
opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI, opFUCOMI,
opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI, opFCOMI,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# endif
const OpFn OP_TABLE(fpu_287_dc_a16)[32] = {
// clang-format off
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDr, opFMULr, ILLEGAL_a16, ILLEGAL_a16, opFSUBRr, opFSUBr, opFDIVRr, opFDIVr
// clang-format on
};
const OpFn OP_TABLE(fpu_287_dc_a32)[32] = {
// clang-format off
opFADDd_a32, opFMULd_a32, opFCOMd_a32, opFCOMPd_a32, opFSUBd_a32, opFSUBRd_a32, opFDIVd_a32, opFDIVRd_a32,
opFADDd_a32, opFMULd_a32, opFCOMd_a32, opFCOMPd_a32, opFSUBd_a32, opFSUBRd_a32, opFDIVd_a32, opFDIVRd_a32,
opFADDd_a32, opFMULd_a32, opFCOMd_a32, opFCOMPd_a32, opFSUBd_a32, opFSUBRd_a32, opFDIVd_a32, opFDIVRd_a32,
opFADDr, opFMULr, ILLEGAL_a32, ILLEGAL_a32, opFSUBRr, opFSUBr, opFDIVRr, opFDIVr
// clang-format on
};
const OpFn OP_TABLE(fpu_dc_a16)[32] = {
// clang-format off
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDd_a16, opFMULd_a16, opFCOMd_a16, opFCOMPd_a16, opFSUBd_a16, opFSUBRd_a16, opFDIVd_a16, opFDIVRd_a16,
opFADDr, opFMULr, opFCOM, opFCOMP, opFSUBRr, opFSUBr, opFDIVRr, opFDIVr
// clang-format on
};
const OpFn OP_TABLE(fpu_dc_a32)[32] = {
// clang-format off
opFADDd_a32, opFMULd_a32, opFCOMd_a32, opFCOMPd_a32, opFSUBd_a32, opFSUBRd_a32, opFDIVd_a32, opFDIVRd_a32,
opFADDd_a32, opFMULd_a32, opFCOMd_a32, opFCOMPd_a32, opFSUBd_a32, opFSUBRd_a32, opFDIVd_a32, opFDIVRd_a32,
opFADDd_a32, opFMULd_a32, opFCOMd_a32, opFCOMPd_a32, opFSUBd_a32, opFSUBRd_a32, opFDIVd_a32, opFDIVRd_a32,
opFADDr, opFMULr, opFCOM, opFCOMP, opFSUBRr, opFSUBr, opFDIVRr, opFDIVr
// clang-format on
};
const OpFn OP_TABLE(fpu_287_dd_a16)[256] = {
// clang-format off
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFST, opFST, opFST, opFST, opFST, opFST, opFST, opFST,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_dd_a32)[256] = {
// clang-format off
opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32,
opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32,
opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32,
opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32,
opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32,
opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32,
opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32,
opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32,
opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32,
opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32,
opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32,
opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32,
opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFST, opFST, opFST, opFST, opFST, opFST, opFST, opFST,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(fpu_dd_a16)[256] = {
// clang-format off
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16, opFLDd_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16, opFSTd_a16,
opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16, opFSTPd_a16,
opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16, opFSTOR_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16, opFSAVE_a16,
opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16, opFSTSW_a16,
opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFST, opFST, opFST, opFST, opFST, opFST, opFST, opFST,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM,
opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_dd_a32)[256] = {
// clang-format off
opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32,
opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32,
opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32,
opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32,
opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32,
opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32,
opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32,
opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32,
opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32, opFLDd_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32, opFSTd_a32,
opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32, opFSTPd_a32,
opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32, opFSTOR_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32, opFSAVE_a32,
opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32, opFSTSW_a32,
opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE, opFFREE,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFST, opFST, opFST, opFST, opFST, opFST, opFST, opFST,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM, opFUCOM,
opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP, opFUCOMP,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_de_a16)[256] = {
// clang-format off
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP,
opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, opFCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP,
opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP,
opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP,
opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_de_a32)[256] = {
// clang-format off
opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32,
opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32,
opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32,
opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32,
opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32,
opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32,
opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32,
opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32,
opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32,
opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32,
opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32,
opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32,
opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32,
opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32,
opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32,
opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32,
opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32,
opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32,
opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32,
opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32,
opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32,
opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32,
opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32,
opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32,
opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP,
opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, opFCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP,
opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP,
opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP,
opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP,
// clang-format on
};
const OpFn OP_TABLE(fpu_de_a16)[256] = {
// clang-format off
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16, opFADDiw_a16,
opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16, opFMULiw_a16,
opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16, opFCOMiw_a16,
opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16, opFCOMPiw_a16,
opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16, opFSUBiw_a16,
opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16, opFSUBRiw_a16,
opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16, opFDIViw_a16,
opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16, opFDIVRiw_a16,
opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP,
opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP,
opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP,
ILLEGAL_a16, opFCOMPP, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP,
opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP,
opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP,
opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP,
// clang-format on
};
const OpFn OP_TABLE(fpu_de_a32)[256] = {
// clang-format off
opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32,
opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32,
opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32,
opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32,
opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32,
opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32,
opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32,
opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32,
opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32,
opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32,
opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32,
opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32,
opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32,
opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32,
opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32,
opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32,
opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32, opFADDiw_a32,
opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32, opFMULiw_a32,
opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32, opFCOMiw_a32,
opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32, opFCOMPiw_a32,
opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32, opFSUBiw_a32,
opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32, opFSUBRiw_a32,
opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32, opFDIViw_a32,
opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32, opFDIVRiw_a32,
opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP, opFADDP,
opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP, opFMULP,
opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP, opFCOMP,
ILLEGAL_a32, opFCOMPP, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP, opFSUBRP,
opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP, opFSUBP,
opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP, opFDIVRP,
opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP, opFDIVP,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_df_a16)[256] = {
// clang-format off
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFSTSW_AX, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_287_df_a32)[256] = {
// clang-format off
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFSTSW_AX, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
const OpFn OP_TABLE(fpu_df_a16)[256] = {
// clang-format off
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTSW_AX, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_df_a32)[256] = {
// clang-format off
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTSW_AX, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# ifndef OPS_286_386
const OpFn OP_TABLE(fpu_686_df_a16)[256] = {
// clang-format off
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16, opFILDiw_a16,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16, opFISTiw_a16,
opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16, opFISTPiw_a16,
FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16, FBLD_a16,
opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16, opFILDiq_a16,
FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16, FBSTP_a16,
FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16, FISTPiq_a16,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTSW_AX, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP,
opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP,
ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16, ILLEGAL_a16,
// clang-format on
};
const OpFn OP_TABLE(fpu_686_df_a32)[256] = {
// clang-format off
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32, opFILDiw_a32,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32, opFISTiw_a32,
opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32, opFISTPiw_a32,
FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32, FBLD_a32,
opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32, opFILDiq_a32,
FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32, FBSTP_a32,
FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32, FISTPiq_a32,
opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP, opFFREEP,
opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH, opFXCH,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP, opFSTP,
opFSTSW_AX, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP, opFUCOMIP,
opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP, opFCOMIP,
ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32, ILLEGAL_a32,
// clang-format on
};
# endif
const OpFn OP_TABLE(nofpu_a16)[256] = {
// clang-format off
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16, op_nofpu_a16,
// clang-format on
};
const OpFn OP_TABLE(nofpu_a32)[256] = {
// clang-format off
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32, op_nofpu_a32,
// clang-format on
};
#endif
#undef ILLEGAL
``` | /content/code_sandbox/src/cpu/x87_ops.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 152,434 |
```objective-c
static int
opMOV_r_CRx_a16(uint32_t fetchdat)
{
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0:
cpu_state.regs[cpu_rm].l = cr0;
if (is486 || isibm486)
cpu_state.regs[cpu_rm].l |= 0x10; /*ET hardwired on 486*/
else {
if (is386)
cpu_state.regs[cpu_rm].l |= 0x7fffffe0;
else
cpu_state.regs[cpu_rm].l |= 0x7ffffff0;
}
break;
case 2:
cpu_state.regs[cpu_rm].l = cr2;
break;
case 3:
cpu_state.regs[cpu_rm].l = cr3;
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
cpu_state.regs[cpu_rm].l = cr4;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_r_CRx_a32(uint32_t fetchdat)
{
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0:
cpu_state.regs[cpu_rm].l = cr0;
if (is486 || isibm486)
cpu_state.regs[cpu_rm].l |= 0x10; /*ET hardwired on 486*/
else {
if (is386)
cpu_state.regs[cpu_rm].l |= 0x7fffffe0;
else
cpu_state.regs[cpu_rm].l |= 0x7ffffff0;
}
break;
case 2:
cpu_state.regs[cpu_rm].l = cr2;
break;
case 3:
cpu_state.regs[cpu_rm].l = cr3;
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
cpu_state.regs[cpu_rm].l = cr4;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opMOV_r_DRx_a16(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
#ifdef USE_DEBUG_REGS_486
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
#endif
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
cpu_state.regs[cpu_rm].l = dr[cpu_reg];
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
cpu_state.regs[cpu_rm].l = dr[6];
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
cpu_state.regs[cpu_rm].l = dr[7];
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_r_DRx_a32(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
#ifdef USE_DEBUG_REGS_486
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
#endif
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
cpu_state.regs[cpu_rm].l = dr[cpu_reg];
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
cpu_state.regs[cpu_rm].l = dr[6];
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
cpu_state.regs[cpu_rm].l = dr[7];
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opMOV_CRx_r_a16(uint32_t fetchdat)
{
uint32_t old_cr0 = cr0;
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0:
if ((cpu_state.regs[cpu_rm].l ^ cr0) & 0x80000001)
flushmmucache();
/* Make sure CPL = 0 when switching from real mode to protected mode. */
if ((cpu_state.regs[cpu_rm].l & 0x01) && !(cr0 & 0x01))
cpu_state.seg_cs.access &= 0x9f;
cr0 = cpu_state.regs[cpu_rm].l;
if (cpu_16bitbus)
cr0 |= 0x10;
if (!(cr0 & 0x80000000))
mmu_perm = 4;
if (hascache && !(cr0 & (1 << 30)))
cpu_cache_int_enabled = 1;
else
cpu_cache_int_enabled = 0;
if (hascache && ((cr0 ^ old_cr0) & (1 << 30)))
cpu_update_waitstates();
if (cr0 & 1)
cpu_cur_status |= CPU_STATUS_PMODE;
else
cpu_cur_status &= ~CPU_STATUS_PMODE;
break;
case 2:
cr2 = cpu_state.regs[cpu_rm].l;
break;
case 3:
cr3 = cpu_state.regs[cpu_rm].l;
flushmmucache();
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
if (((cpu_state.regs[cpu_rm].l ^ cr4) & cpu_CR4_mask) & (CR4_PAE | CR4_PGE))
flushmmucache();
cr4 = cpu_state.regs[cpu_rm].l & cpu_CR4_mask;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_CRx_r_a32(uint32_t fetchdat)
{
uint32_t old_cr0 = cr0;
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0:
if ((cpu_state.regs[cpu_rm].l ^ cr0) & 0x80000001)
flushmmucache();
/* Make sure CPL = 0 when switching from real mode to protected mode. */
if ((cpu_state.regs[cpu_rm].l & 0x01) && !(cr0 & 0x01))
cpu_state.seg_cs.access &= 0x9f;
cr0 = cpu_state.regs[cpu_rm].l;
if (cpu_16bitbus)
cr0 |= 0x10;
if (!(cr0 & 0x80000000))
mmu_perm = 4;
if (hascache && !(cr0 & (1 << 30)))
cpu_cache_int_enabled = 1;
else
cpu_cache_int_enabled = 0;
if (hascache && ((cr0 ^ old_cr0) & (1 << 30)))
cpu_update_waitstates();
if (cr0 & 1)
cpu_cur_status |= CPU_STATUS_PMODE;
else
cpu_cur_status &= ~CPU_STATUS_PMODE;
break;
case 2:
cr2 = cpu_state.regs[cpu_rm].l;
break;
case 3:
cr3 = cpu_state.regs[cpu_rm].l;
flushmmucache();
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
if (((cpu_state.regs[cpu_rm].l ^ cr4) & cpu_CR4_mask) & (CR4_PAE | CR4_PGE))
flushmmucache();
cr4 = cpu_state.regs[cpu_rm].l & cpu_CR4_mask;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opMOV_DRx_r_a16(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
#ifdef USE_DEBUG_REGS_486
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
x86gen();
return 1;
}
#endif
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
dr[6] = (dr[6] & 0xffff0ff0) | (cpu_state.regs[cpu_rm].l & 0x0000f00f);
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
dr[7] = cpu_state.regs[cpu_rm].l | 0x00000400;
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
#ifdef USE_DEBUG_REGS_486
CPU_BLOCK_END();
#endif
return 0;
}
static int
opMOV_DRx_r_a32(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
#ifdef USE_DEBUG_REGS_486
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
#endif
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
dr[6] = (dr[6] & 0xffff0ff0) | (cpu_state.regs[cpu_rm].l & 0x0000f00f);
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
dr[7] = cpu_state.regs[cpu_rm].l | 0x00000400;
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
#ifdef USE_DEBUG_REGS_486
CPU_BLOCK_END();
#endif
return 0;
}
static void
opMOV_r_TRx(void)
{
#if 0
uint32_t base;
base = _tr[4] & 0xfffff800;
#endif
switch (cpu_reg) {
case 3:
#if 0
pclog("[R] %08X cache = %08X\n", base + cache_index, _tr[3]);
#endif
_tr[3] = *(uint32_t *) &(_cache[cache_index]);
cache_index = (cache_index + 4) & 0xf;
break;
}
cpu_state.regs[cpu_rm].l = _tr[cpu_reg];
CLOCK_CYCLES(6);
}
static int
opMOV_r_TRx_a16(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
opMOV_r_TRx();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_r_TRx_a32(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
opMOV_r_TRx();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static void
opMOV_TRx_r(void)
{
uint32_t base;
int i;
int ctl;
_tr[cpu_reg] = cpu_state.regs[cpu_rm].l;
base = _tr[4] & 0xfffff800;
ctl = _tr[5] & 3;
switch (cpu_reg) {
case 3:
#if 0
pclog("[W] %08X cache = %08X\n", base + cache_index, _tr[3]);
#endif
*(uint32_t *) &(_cache[cache_index]) = _tr[3];
cache_index = (cache_index + 4) & 0xf;
break;
case 4:
#if 0
if (!(cr0 & 1) && !(_tr[5] & (1 << 19)))
pclog("TAG = %08X, DEST = %08X\n", base, base + cache_index - 16);
#endif
break;
case 5:
#if 0
pclog("[16] EXT = %i (%i), SET = %04X\n", !!(_tr[5] & (1 << 19)), _tr[5] & 0x03, _tr[5] & 0x7f0);
#endif
if (!(_tr[5] & (1 << 19))) {
switch (ctl) {
case 0:
#if 0
pclog(" Cache fill or read...\n", base);
#endif
break;
case 1:
base += (_tr[5] & 0x7f0);
#if 0
pclog(" Writing 16 bytes to %08X...\n", base);
#endif
for (i = 0; i < 16; i += 4)
mem_writel_phys(base + i, *(uint32_t *) &(_cache[i]));
break;
case 2:
base += (_tr[5] & 0x7f0);
#if 0
pclog(" Reading 16 bytes from %08X...\n", base);
#endif
for (i = 0; i < 16; i += 4)
*(uint32_t *) &(_cache[i]) = mem_readl_phys(base + i);
break;
case 3:
#if 0
pclog(" Cache invalidate/flush...\n", base);
#endif
break;
}
}
break;
}
CLOCK_CYCLES(6);
}
static int
opMOV_TRx_r_a16(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
opMOV_TRx_r();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_TRx_r_a32(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
opMOV_TRx_r();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mov_ctrl.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 4,258 |
```objective-c
static int
opCMC(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags ^= C_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCLC(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCLD(uint32_t fetchdat)
{
cpu_state.flags &= ~D_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCLI(uint32_t fetchdat)
{
if (!IOPLp) {
if ((!(cpu_state.eflags & VM_FLAG) && (cr4 & CR4_PVI)) || ((cpu_state.eflags & VM_FLAG) && (cr4 & CR4_VME))) {
cpu_state.eflags &= ~VIF_FLAG;
} else {
x86gpf(NULL, 0);
return 1;
}
} else
cpu_state.flags &= ~I_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSTC(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags |= C_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSTD(uint32_t fetchdat)
{
cpu_state.flags |= D_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSTI(uint32_t fetchdat)
{
if (!IOPLp) {
if ((!(cpu_state.eflags & VM_FLAG) && (cr4 & CR4_PVI)) || ((cpu_state.eflags & VM_FLAG) && (cr4 & CR4_VME))) {
if (cpu_state.eflags & VIP_FLAG) {
x86gpf(NULL, 0);
return 1;
} else
cpu_state.eflags |= VIF_FLAG;
} else {
x86gpf(NULL, 0);
return 1;
}
} else
cpu_state.flags |= I_FLAG;
/*First instruction after STI will always execute, regardless of whether
there is a pending interrupt*/
cpu_end_block_after_ins = 2;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSAHF(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags = (cpu_state.flags & 0xff00) | (AH & 0xd5) | 2;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opLAHF(uint32_t fetchdat)
{
flags_rebuild();
AH = cpu_state.flags & 0xff;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opPUSHF(uint32_t fetchdat)
{
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
if (cr4 & CR4_VME) {
uint16_t temp;
flags_rebuild();
temp = (cpu_state.flags & ~I_FLAG) | 0x3000;
if (cpu_state.eflags & VIF_FLAG)
temp |= I_FLAG;
PUSH_W(temp);
} else {
x86gpf(NULL, 0);
return 1;
}
} else {
flags_rebuild();
PUSH_W(cpu_state.flags);
}
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0);
return cpu_state.abrt;
}
static int
opPUSHFD(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
if (cpu_CR4_mask & CR4_VME)
tempw = cpu_state.eflags & 0x3c;
else if (CPUID)
tempw = cpu_state.eflags & 0x24;
else
tempw = cpu_state.eflags & 4;
flags_rebuild();
PUSH_L(cpu_state.flags | (tempw << 16));
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 0);
return cpu_state.abrt;
}
static int
opPOPF_186(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
tempw = POP_W();
if (cpu_state.abrt)
return 1;
if (!(msw & 1))
cpu_state.flags = (cpu_state.flags & 0x7000) | (tempw & 0x0fd5) | 2;
else if (!(CPL))
cpu_state.flags = (tempw & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
flags_extract();
rf_flag_no_clear = 1;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opPOPF_286(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
tempw = POP_W();
if (cpu_state.abrt)
return 1;
if (!(msw & 1))
cpu_state.flags = (cpu_state.flags & 0x7000) | (tempw & 0x0fd5) | 2;
else if (!(CPL))
cpu_state.flags = (tempw & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
flags_extract();
rf_flag_no_clear = 1;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opPOPF(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
if (cr4 & CR4_VME) {
uint32_t old_esp = ESP;
tempw = POP_W();
if (cpu_state.abrt) {
ESP = old_esp;
return 1;
}
if ((tempw & T_FLAG) || ((tempw & I_FLAG) && (cpu_state.eflags & VIP_FLAG))) {
ESP = old_esp;
x86gpf(NULL, 0);
return 1;
}
if (tempw & I_FLAG)
cpu_state.eflags |= VIF_FLAG;
else
cpu_state.eflags &= ~VIF_FLAG;
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
} else {
x86gpf(NULL, 0);
return 1;
}
} else {
tempw = POP_W();
if (cpu_state.abrt)
return 1;
if (!(CPL) || !(msw & 1))
cpu_state.flags = (tempw & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
}
flags_extract();
rf_flag_no_clear = 1;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opPOPFD(uint32_t fetchdat)
{
uint32_t templ;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
templ = POP_L();
if (cpu_state.abrt)
return 1;
if (!(CPL) || !(msw & 1))
cpu_state.flags = (templ & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (templ & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (templ & 0x4dd5) | 2;
templ &= (is486 || isibm486) ? 0x3c0000 : 0;
templ |= ((cpu_state.eflags & 3) << 16);
if (cpu_CR4_mask & CR4_VME)
cpu_state.eflags = (templ >> 16) & 0x3f;
else if (CPUID)
cpu_state.eflags = (templ >> 16) & 0x27;
else if (is486 || isibm486)
cpu_state.eflags = (templ >> 16) & 7;
else
cpu_state.eflags = (templ >> 16) & 3;
flags_extract();
rf_flag_no_clear = 1;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_flag_2386.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,729 |
```c
/*Since IDT/Centaur didn't document cycle timings in the WinChip datasheets, and
I don't currently own a WinChip 2 to test against, most of the timing here is
a guess. This code makes the current (probably wrong) assumptions :
- FPU uses same timings as a Pentium, except for FXCH (which doesn't pair)
- 3DNow! instructions perfectly pair
- MMX follows mostly Pentium rules - one pipeline has shift/pack, one has
multiply, and other instructions can execute in either pipeline
- Instructions with prefixes can pair if both instructions are fully decoded
when the first instruction starts execution.*/
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_ops.h"
#include "x87_sf.h"
#include "x87.h"
#include "codegen.h"
#include "codegen_ops.h"
#include "codegen_timing_common.h"
/*Instruction has different execution time for 16 and 32 bit data. Does not pair */
#define CYCLES_HAS_MULTI (1 << 31)
#define CYCLES_FPU (1 << 30)
#define CYCLES_IS_MMX_MUL (1 << 29)
#define CYCLES_IS_MMX_SHIFT (1 << 28)
#define CYCLES_IS_MMX_ANY (1 << 27)
#define CYCLES_IS_3DNOW (1 << 26)
#define CYCLES_MMX_MUL(c) (CYCLES_IS_MMX_MUL | c)
#define CYCLES_MMX_SHIFT(c) (CYCLES_IS_MMX_SHIFT | c)
#define CYCLES_MMX_ANY(c) (CYCLES_IS_MMX_ANY | c)
#define CYCLES_3DNOW(c) (CYCLES_IS_3DNOW | c)
#define CYCLES_IS_MMX (CYCLES_IS_MMX_MUL | CYCLES_IS_MMX_SHIFT | CYCLES_IS_MMX_ANY | CYCLES_IS_3DNOW)
#define GET_CYCLES(c) (c & ~(CYCLES_HAS_MULTI | CYCLES_FPU | CYCLES_IS_MMX))
#define CYCLES(c) c
#define CYCLES2(c16, c32) (CYCLES_HAS_MULTI | c16 | (c32 << 8))
/*comp_time = cycles until instruction complete
i_overlap = cycles that overlap with integer
f_overlap = cycles that overlap with subsequent FPU*/
#define FPU_CYCLES(comp_time, i_overlap, f_overlap) (comp_time) | (i_overlap << 8) | (f_overlap << 16) | CYCLES_FPU
#define FPU_COMP_TIME(timing) (timing & 0xff)
#define FPU_I_OVERLAP(timing) ((timing >> 8) & 0xff)
#define FPU_F_OVERLAP(timing) ((timing >> 16) & 0xff)
#define FPU_I_LATENCY(timing) (FPU_COMP_TIME(timing) - FPU_I_OVERLAP(timing))
#define FPU_F_LATENCY(timing) (FPU_I_OVERLAP(timing) - FPU_F_OVERLAP(timing))
#define FPU_RESULT_LATENCY(timing) ((timing >> 8) & 0xff)
#define INVALID 0
static uint32_t opcode_timings_winchip2[256] = {
// clang-format off
/*00*/ CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(2), INVALID,
/*10*/ CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(3),
/*20*/ CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3),
/*30*/ CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2),
/*40*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*50*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*60*/ CYCLES(11), CYCLES(9), CYCLES(7), CYCLES(9), CYCLES(4), CYCLES(4), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES2(17,25), CYCLES(1), CYCLES2(17,20), CYCLES(17), CYCLES(17), CYCLES(17), CYCLES(17),
/*70*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*80*/ CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(5), CYCLES(5), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(1), CYCLES(5), CYCLES(6),
/*90*/ CYCLES(1), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(0), CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(2), CYCLES(3),
/*a0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(7), CYCLES(7), CYCLES(8), CYCLES(8), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6),
/*b0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*c0*/ CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(1), CYCLES(1), CYCLES(14), CYCLES(5), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(3), CYCLES(0),
/*d0*/ CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(15), CYCLES(14), CYCLES(2), CYCLES(4), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*e0*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(5), CYCLES(14), CYCLES(14), CYCLES(16), CYCLES(16), CYCLES(3), CYCLES(3), CYCLES(17), CYCLES(3), CYCLES(14), CYCLES(14), CYCLES(14), CYCLES(14),
/*f0*/ CYCLES(4), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(4), CYCLES(2), INVALID, INVALID, CYCLES(2), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(3), INVALID
// clang-format on
};
static uint32_t opcode_timings_winchip2_mod3[256] = {
// clang-format off
/*00*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), INVALID,
/*10*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(3),
/*20*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(3),
/*30*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2),
/*40*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*50*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*60*/ CYCLES(11), CYCLES(9), CYCLES(7), CYCLES(9), CYCLES(4), CYCLES(4), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES2(14,25), CYCLES(1), CYCLES2(17,20), CYCLES(17), CYCLES(17), CYCLES(17), CYCLES(17),
/*70*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*80*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(2), CYCLES(1), CYCLES(2), CYCLES(1),
/*90*/ CYCLES(1), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(0), CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(2), CYCLES(3),
/*a0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(7), CYCLES(7), CYCLES(8), CYCLES(8), CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6),
/*b0*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*c0*/ CYCLES(4), CYCLES(4), CYCLES(5), CYCLES(5), CYCLES(6), CYCLES(6), CYCLES(1), CYCLES(1), CYCLES(14), CYCLES(5), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(3), CYCLES(0),
/*d0*/ CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(4), CYCLES(15), CYCLES(14), CYCLES(2), CYCLES(4), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*e0*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(5), CYCLES(14), CYCLES(14), CYCLES(16), CYCLES(16), CYCLES(3), CYCLES(3), CYCLES(17), CYCLES(3), CYCLES(14), CYCLES(14), CYCLES(14), CYCLES(14),
/*f0*/ CYCLES(4), CYCLES(0), CYCLES(0), CYCLES(0), CYCLES(4), CYCLES(2), INVALID, INVALID, CYCLES(2), CYCLES(2), CYCLES(3), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(3), INVALID,
// clang-format on
};
static uint32_t opcode_timings_winchip2_0f[256] = {
// clang-format off
/*00*/ CYCLES(20), CYCLES(11), CYCLES(11), CYCLES(10), INVALID, CYCLES(195), CYCLES(7), INVALID, CYCLES(1000), CYCLES(10000), INVALID, INVALID, INVALID, CYCLES_3DNOW(1), CYCLES(1), CYCLES_3DNOW(1),
/*10*/ INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*20*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*30*/ CYCLES(9), CYCLES(1), CYCLES(9), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*40*/ INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*50*/ INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*60*/ CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), INVALID, INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2),
/*70*/ INVALID, CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES(100), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2),
/*80*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*90*/ CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/*a0*/ CYCLES(3), CYCLES(3), CYCLES(14), CYCLES(8), CYCLES(3), CYCLES(4), INVALID, INVALID, CYCLES(3), CYCLES(3), INVALID, CYCLES(13), CYCLES(3), CYCLES(3), INVALID, CYCLES2(18,30),
/*b0*/ CYCLES(10), CYCLES(10), CYCLES(6), CYCLES(13), CYCLES(6), CYCLES(6), CYCLES(3), CYCLES(3), INVALID, INVALID, CYCLES(6), CYCLES(13), CYCLES(7), CYCLES(7), CYCLES(3), CYCLES(3),
/*c0*/ CYCLES(4), CYCLES(4), INVALID, INVALID, INVALID, INVALID, INVALID, CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*d0*/ INVALID, CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), INVALID, CYCLES_MMX_MUL(2), INVALID, INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), INVALID, CYCLES_MMX_ANY(2),
/*e0*/ INVALID, CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), INVALID, INVALID, CYCLES_MMX_MUL(2), INVALID, INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), INVALID, CYCLES_MMX_ANY(2),
/*f0*/ INVALID, CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), CYCLES_MMX_SHIFT(2), INVALID, CYCLES_MMX_MUL(2), INVALID, INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), INVALID, CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), CYCLES_MMX_ANY(2), INVALID,
// clang-format on
};
static uint32_t opcode_timings_winchip2_0f_mod3[256] = {
// clang-format off
/*00*/ CYCLES(20), CYCLES(11), CYCLES(11), CYCLES(10), INVALID, CYCLES(195), CYCLES(7), INVALID, CYCLES(1000), CYCLES(10000), INVALID, INVALID, INVALID, CYCLES_3DNOW(1), CYCLES(1), CYCLES_3DNOW(1),
/*10*/ INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*20*/ CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), CYCLES(6), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*30*/ CYCLES(9), CYCLES(1), CYCLES(9), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*40*/ INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*50*/ INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, INVALID,
/*60*/ CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), INVALID, INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1),
/*70*/ INVALID, CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES(100), INVALID, INVALID, INVALID, INVALID, INVALID, INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1),
/*80*/ CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*90*/ CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3),
/*a0*/ CYCLES(3), CYCLES(3), CYCLES(14), CYCLES(8), CYCLES(3), CYCLES(4), INVALID, INVALID, CYCLES(3), CYCLES(3), INVALID, CYCLES(13), CYCLES(3), CYCLES(3), INVALID, CYCLES2(18,30),
/*b0*/ CYCLES(10), CYCLES(10), CYCLES(6), CYCLES(13), CYCLES(6), CYCLES(6), CYCLES(3), CYCLES(3), INVALID, INVALID, CYCLES(6), CYCLES(13), CYCLES(7), CYCLES(7), CYCLES(3), CYCLES(3),
/*c0*/ CYCLES(4), CYCLES(4), INVALID, INVALID, INVALID, INVALID, INVALID, CYCLES(3), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1), CYCLES(1),
/*d0*/ INVALID, CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), INVALID, CYCLES_MMX_MUL(1), INVALID, INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), INVALID, CYCLES_MMX_ANY(1),
/*e0*/ INVALID, CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), INVALID, INVALID, CYCLES_MMX_MUL(1), INVALID, INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), INVALID, CYCLES_MMX_ANY(1),
/*f0*/ INVALID, CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), CYCLES_MMX_SHIFT(1), INVALID, CYCLES_MMX_MUL(1), INVALID, INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), INVALID, CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), CYCLES_MMX_ANY(1), INVALID,
// clang-format on
};
static uint32_t opcode_timings_winchip2_shift[8] = {
// clang-format off
CYCLES(7), CYCLES(7), CYCLES(10), CYCLES(10), CYCLES(7), CYCLES(7), CYCLES(7), CYCLES(7)
// clang-format on
};
static uint32_t opcode_timings_winchip2_shift_mod3[8] = {
// clang-format off
CYCLES(3), CYCLES(3), CYCLES(9), CYCLES(9), CYCLES(3), CYCLES(3), CYCLES(3), CYCLES(3)
// clang-format on
};
static uint32_t opcode_timings_winchip2_f6[8] = {
// clang-format off
CYCLES(2), INVALID, CYCLES(2), CYCLES(2), CYCLES(13), CYCLES(14), CYCLES(16), CYCLES(19)
// clang-format on
};
static uint32_t opcode_timings_winchip2_f6_mod3[8] = {
// clang-format off
CYCLES(1), INVALID, CYCLES(1), CYCLES(1), CYCLES(13), CYCLES(14), CYCLES(16), CYCLES(19)
// clang-format on
};
static uint32_t opcode_timings_winchip2_f7[8] = {
// clang-format off
CYCLES(2), INVALID, CYCLES(2), CYCLES(2), CYCLES(21), CYCLES2(22,38), CYCLES2(24,40), CYCLES2(27,43)
// clang-format on
};
static uint32_t opcode_timings_winchip2_f7_mod3[8] = {
// clang-format off
CYCLES(1), INVALID, CYCLES(1), CYCLES(1), CYCLES(21), CYCLES2(22,38), CYCLES2(24,40), CYCLES2(27,43)
// clang-format on
};
static uint32_t opcode_timings_winchip2_ff[8] = {
// clang-format off
CYCLES(2), CYCLES(2), CYCLES(5), CYCLES(0), CYCLES(5), CYCLES(0), CYCLES(5), INVALID
// clang-format on
};
static uint32_t opcode_timings_winchip2_ff_mod3[8] = {
// clang-format off
CYCLES(1), CYCLES(1), CYCLES(5), CYCLES(0), CYCLES(5), CYCLES(0), CYCLES(5), INVALID
// clang-format on
};
static uint32_t opcode_timings_winchip2_d8[8] = {
// clang-format off
/* FADDs FMULs FCOMs FCOMPs*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/* FSUBs FSUBRs FDIVs FDIVRs*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(39,38,2), FPU_CYCLES(39,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/* FSUB FSUBR FDIV FDIVR*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(39,38,2), FPU_CYCLES(39,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs*/
FPU_CYCLES(1,0,0), INVALID, FPU_CYCLES(2,0,0), FPU_CYCLES(2,0,0),
/* FLDENV FLDCW FSTENV FSTCW*/
FPU_CYCLES(32,0,0), FPU_CYCLES(8,0,0), FPU_CYCLES(48,0,0), FPU_CYCLES(2,0,0)
// clang-format on
};
static uint32_t opcode_timings_winchip2_d9_mod3[64] = {
// clang-format off
/*FLD*/
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/*FXCH*/
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/*FNOP*/
FPU_CYCLES(3,0,0), INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*FSTP*/
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/* opFCHS opFABS*/
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), INVALID, INVALID,
/* opFTST opFXAM*/
FPU_CYCLES(1,0,0), FPU_CYCLES(21,4,0), INVALID, INVALID,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI*/
FPU_CYCLES(2,0,0), FPU_CYCLES(5,2,2), FPU_CYCLES(5,2,2), FPU_CYCLES(5,2,2),
/* opFLDEG2 opFLDLN2 opFLDZ*/
FPU_CYCLES(5,2,2), FPU_CYCLES(5,2,2), FPU_CYCLES(2,0,0), INVALID,
/* opF2XM1 opFYL2X opFPTAN opFPATAN*/
FPU_CYCLES(53,2,2), FPU_CYCLES(103,2,2),FPU_CYCLES(120,36,0),FPU_CYCLES(112,2,2),
/* opFDECSTP opFINCSTP,*/
INVALID, INVALID, FPU_CYCLES(2,0,0), FPU_CYCLES(2,0,0),
/* opFPREM opFSQRT opFSINCOS*/
FPU_CYCLES(64,2,2), INVALID, FPU_CYCLES(70,69,2),FPU_CYCLES(89,2,2),
/* opFRNDINT opFSCALE opFSIN opFCOS*/
FPU_CYCLES(9,0,0), FPU_CYCLES(20,5,0), FPU_CYCLES(65,2,2), FPU_CYCLES(65,2,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_da[8] = {
// clang-format off
/* FIADDl FIMULl FICOMl FICOMPl*/
FPU_CYCLES(6,2,2), FPU_CYCLES(6,2,2), FPU_CYCLES(4,0,0), FPU_CYCLES(4,0,0),
/* FISUBl FISUBRl FIDIVl FIDIVRl*/
FPU_CYCLES(6,2,2), FPU_CYCLES(6,2,2), FPU_CYCLES(42,38,2), FPU_CYCLES(42,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_da_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FCOMPP*/
INVALID, FPU_CYCLES(1,0,0), INVALID, INVALID
// clang-format on
};
static uint32_t opcode_timings_winchip2_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil*/
FPU_CYCLES(3,2,2), INVALID, FPU_CYCLES(6,0,0), FPU_CYCLES(6,0,0),
/* FLDe FSTPe*/
INVALID, FPU_CYCLES(3,0,0), INVALID, FPU_CYCLES(3,0,0)
// clang-format on
};
static uint32_t opcode_timings_winchip2_db_mod3[64] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* opFNOP opFCLEX opFINIT*/
INVALID, FPU_CYCLES(1,0,0), FPU_CYCLES(7,0,0), FPU_CYCLES(17,0,0),
/* opFNOP opFNOP*/
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
// clang-format on
};
static uint32_t opcode_timings_winchip2_dc[8] = {
// clang-format off
/* FADDd FMULd FCOMd FCOMPd*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/* FSUBd FSUBRd FDIVd FDIVRd*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(39,38,2), FPU_CYCLES(39,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2),INVALID, INVALID,
/* opFSUBRr opFSUBr opFDIVRr opFDIVr*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2),FPU_CYCLES(39,38,2), FPU_CYCLES(39,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd*/
FPU_CYCLES(1,0,0), INVALID, FPU_CYCLES(2,0,0), FPU_CYCLES(2,0,0),
/* FRSTOR FSAVE FSTSW*/
FPU_CYCLES(70,0,0), INVALID, FPU_CYCLES(127,0,0), FPU_CYCLES(6,0,0)
// clang-format on
};
static uint32_t opcode_timings_winchip2_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP*/
FPU_CYCLES(2,0,0), INVALID, FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),
/* FUCOM FUCOMP*/
FPU_CYCLES(1,0,0), FPU_CYCLES(1,0,0),INVALID, INVALID
// clang-format on
};
static uint32_t opcode_timings_winchip2_de[8] = {
// clang-format off
/* FIADDw FIMULw FICOMw FICOMPw*/
FPU_CYCLES(6,2,2), FPU_CYCLES(6,2,2), FPU_CYCLES(4,0,0), FPU_CYCLES(4,0,0),
/* FISUBw FISUBRw FIDIVw FIDIVRw*/
FPU_CYCLES(6,2,2), FPU_CYCLES(6,2,2), FPU_CYCLES(42,38,2), FPU_CYCLES(42,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_de_mod3[8] = {
// clang-format off
/* FADDP FMULP FCOMPP*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), INVALID, FPU_CYCLES(1,0,0),
/* FSUBP FSUBRP FDIVP FDIVRP*/
FPU_CYCLES(3,2,2), FPU_CYCLES(3,2,2), FPU_CYCLES(39,38,2), FPU_CYCLES(39,38,2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw*/
FPU_CYCLES(3,2,2), INVALID, FPU_CYCLES(6,0,0), FPU_CYCLES(6,0,0),
/* FILDiq FBSTP FISTPiq*/
INVALID, FPU_CYCLES(3,2,2), FPU_CYCLES(148,0,0), FPU_CYCLES(6,0,0)
// clang-format on
};
static uint32_t opcode_timings_winchip2_df_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FSTSW AX*/
FPU_CYCLES(6,0,0), INVALID, INVALID, INVALID
// clang-format on
};
static uint32_t opcode_timings_winchip2_8x[8] = {
// clang-format off
CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_8x_mod3[8] = {
// clang-format off
CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_81[8] = {
// clang-format off
CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2)
// clang-format on
};
static uint32_t opcode_timings_winchip2_81_mod3[8] = {
// clang-format off
CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2), CYCLES(2)
// clang-format on
};
static int timing_count;
static uint8_t last_prefix;
static uint32_t regmask_modified;
static int decode_delay;
static int decode_delay_offset;
static int fpu_latency;
static int fpu_st_latency[8];
static int u_pipe_full;
static uint32_t u_pipe_opcode;
static uint32_t *u_pipe_timings;
static uint32_t u_pipe_op_32;
static uint32_t u_pipe_regmask;
static uint32_t u_pipe_fetchdat;
static int u_pipe_decode_delay_offset;
static uint64_t *u_pipe_deps;
int
can_pair(uint32_t timing_a, uint32_t timing_b, uint8_t regmask_b)
{
/*Only MMX/3DNow instructions can pair*/
if (!(timing_b & CYCLES_IS_MMX))
return 0;
/*Only one MMX multiply per cycle*/
if ((timing_a & CYCLES_IS_MMX_MUL) && (timing_b & CYCLES_IS_MMX_MUL))
return 0;
/*Only one MMX shift/pack per cycle*/
if ((timing_a & CYCLES_IS_MMX_SHIFT) && (timing_b & CYCLES_IS_MMX_SHIFT))
return 0;
/*Second instruction can not access registers written by first*/
if (u_pipe_regmask & regmask_b)
return 0;
/*Must have had enough time to decode prefixes*/
if ((decode_delay + decode_delay_offset + u_pipe_decode_delay_offset) > 0)
return 0;
return 1;
}
static inline int
COUNT(uint32_t c, int op_32)
{
if (c & CYCLES_FPU)
return FPU_I_LATENCY(c);
if (c & CYCLES_HAS_MULTI) {
if (op_32 & 0x100)
return (c >> 8) & 0xff;
return c & 0xff;
}
return GET_CYCLES(c);
}
static int
check_agi(uint64_t *deps, uint8_t opcode, uint32_t fetchdat, int op_32)
{
uint32_t addr_regmask = get_addr_regmask(deps[opcode], fetchdat, op_32);
/*Instructions that use ESP implicitly (eg PUSH, POP, CALL etc) do not
cause AGIs with each other, but do with instructions that use it explicitly*/
if ((addr_regmask & REGMASK_IMPL_ESP) && (regmask_modified & (1 << REG_ESP)) && !(regmask_modified & REGMASK_IMPL_ESP))
addr_regmask |= (1 << REG_ESP);
return (regmask_modified & addr_regmask) & ~REGMASK_IMPL_ESP;
}
static int
codegen_fpu_latencies(uint64_t deps, int reg)
{
int latency = fpu_latency;
if ((deps & FPU_RW_ST0) && fpu_st_latency[0] && fpu_st_latency[0] > latency)
latency = fpu_st_latency[0];
if ((deps & FPU_RW_ST1) && fpu_st_latency[1] && fpu_st_latency[1] > latency)
latency = fpu_st_latency[1];
if ((deps & FPU_RW_STREG) && fpu_st_latency[reg] && fpu_st_latency[reg] > latency)
latency = fpu_st_latency[reg];
return latency;
}
#define SUB_AND_CLAMP(latency, count) \
latency -= count; \
if (latency < 0) \
latency = 0
static void
codegen_fpu_latency_clock(int count)
{
SUB_AND_CLAMP(fpu_latency, count);
SUB_AND_CLAMP(fpu_st_latency[0], count);
SUB_AND_CLAMP(fpu_st_latency[1], count);
SUB_AND_CLAMP(fpu_st_latency[2], count);
SUB_AND_CLAMP(fpu_st_latency[3], count);
SUB_AND_CLAMP(fpu_st_latency[4], count);
SUB_AND_CLAMP(fpu_st_latency[5], count);
SUB_AND_CLAMP(fpu_st_latency[6], count);
SUB_AND_CLAMP(fpu_st_latency[7], count);
}
static void
codegen_instruction(uint32_t *timings, uint64_t *deps, uint8_t opcode, uint32_t fetchdat, int decode_delay_offset, int op_32, int exec_delay)
{
int instr_cycles;
int latency = 0;
if ((timings[opcode] & CYCLES_FPU) && !(deps[opcode] & FPU_FXCH))
instr_cycles = latency = codegen_fpu_latencies(deps[opcode], fetchdat & 7);
else
instr_cycles = 0;
if ((decode_delay + decode_delay_offset) > 0)
codegen_fpu_latency_clock(decode_delay + decode_delay_offset + instr_cycles);
else
codegen_fpu_latency_clock(instr_cycles);
instr_cycles += COUNT(timings[opcode], op_32);
instr_cycles += exec_delay;
if ((decode_delay + decode_delay_offset) > 0)
codegen_block_cycles += instr_cycles + decode_delay + decode_delay_offset;
else
codegen_block_cycles += instr_cycles;
decode_delay = (-instr_cycles) + 1;
if (deps[opcode] & FPU_POP) {
for (uint8_t c = 0; c < 7; c++)
fpu_st_latency[c] = fpu_st_latency[c + 1];
fpu_st_latency[7] = 0;
}
if (deps[opcode] & FPU_POP2) {
for (uint8_t c = 0; c < 6; c++)
fpu_st_latency[c] = fpu_st_latency[c + 2];
fpu_st_latency[6] = fpu_st_latency[7] = 0;
}
if (timings[opcode] & CYCLES_FPU) {
#if 0
if (fpu_latency)
fatal("Bad latency FPU\n");*/
#endif
fpu_latency = FPU_F_LATENCY(timings[opcode]);
}
if (deps[opcode] & FPU_PUSH) {
for (uint8_t c = 0; c < 7; c++)
fpu_st_latency[c + 1] = fpu_st_latency[c];
fpu_st_latency[0] = 0;
}
if (deps[opcode] & FPU_WRITE_ST0) {
#if 0
if (fpu_st_latency[0])
fatal("Bad latency ST0\n");*/
#endif
fpu_st_latency[0] = FPU_RESULT_LATENCY(timings[opcode]);
}
if (deps[opcode] & FPU_WRITE_ST1) {
#if 0
if (fpu_st_latency[1])
fatal("Bad latency ST1\n");*/
#endif
fpu_st_latency[1] = FPU_RESULT_LATENCY(timings[opcode]);
}
if (deps[opcode] & FPU_WRITE_STREG) {
int reg = fetchdat & 7;
if (deps[opcode] & FPU_POP)
reg--;
if (reg >= 0 && !(reg == 0 && (deps[opcode] & FPU_WRITE_ST0)) && !(reg == 1 && (deps[opcode] & FPU_WRITE_ST1))) {
#if 0
if (fpu_st_latency[reg])
fatal("Bad latency STREG %i %08x %i %016llx %02x\n",fpu_st_latency[reg], fetchdat, reg, timings[opcode], opcode);*/
#endif
fpu_st_latency[reg] = FPU_RESULT_LATENCY(timings[opcode]);
}
}
}
static void
codegen_timing_winchip2_block_start(void)
{
regmask_modified = 0;
decode_delay = decode_delay_offset = 0;
u_pipe_full = 0;
}
static void
codegen_timing_winchip2_start(void)
{
timing_count = 0;
last_prefix = 0;
}
static void
codegen_timing_winchip2_prefix(uint8_t prefix, uint32_t fetchdat)
{
if (prefix == 0x0f) {
/*0fh prefix is 'free'*/
last_prefix = prefix;
return;
}
/*On WinChip all prefixes take 1 cycle to decode. Decode may be shadowed
by execution of previous instructions*/
decode_delay_offset++;
last_prefix = prefix;
}
static void
codegen_timing_winchip2_opcode(uint8_t opcode, uint32_t fetchdat, int op_32, UNUSED(uint32_t op_pc))
{
uint32_t *timings;
uint64_t *deps;
int mod3 = ((fetchdat & 0xc0) == 0xc0);
int bit8 = !(opcode & 1);
int agi_stall = 0;
switch (last_prefix) {
case 0x0f:
timings = mod3 ? opcode_timings_winchip2_0f_mod3 : opcode_timings_winchip2_0f;
deps = mod3 ? opcode_deps_0f_mod3 : opcode_deps_0f;
break;
case 0xd8:
timings = mod3 ? opcode_timings_winchip2_d8_mod3 : opcode_timings_winchip2_d8;
deps = mod3 ? opcode_deps_d8_mod3 : opcode_deps_d8;
opcode = (opcode >> 3) & 7;
break;
case 0xd9:
timings = mod3 ? opcode_timings_winchip2_d9_mod3 : opcode_timings_winchip2_d9;
deps = mod3 ? opcode_deps_d9_mod3 : opcode_deps_d9;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xda:
timings = mod3 ? opcode_timings_winchip2_da_mod3 : opcode_timings_winchip2_da;
deps = mod3 ? opcode_deps_da_mod3 : opcode_deps_da;
opcode = (opcode >> 3) & 7;
break;
case 0xdb:
timings = mod3 ? opcode_timings_winchip2_db_mod3 : opcode_timings_winchip2_db;
deps = mod3 ? opcode_deps_db_mod3 : opcode_deps_db;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xdc:
timings = mod3 ? opcode_timings_winchip2_dc_mod3 : opcode_timings_winchip2_dc;
deps = mod3 ? opcode_deps_dc_mod3 : opcode_deps_dc;
opcode = (opcode >> 3) & 7;
break;
case 0xdd:
timings = mod3 ? opcode_timings_winchip2_dd_mod3 : opcode_timings_winchip2_dd;
deps = mod3 ? opcode_deps_dd_mod3 : opcode_deps_dd;
opcode = (opcode >> 3) & 7;
break;
case 0xde:
timings = mod3 ? opcode_timings_winchip2_de_mod3 : opcode_timings_winchip2_de;
deps = mod3 ? opcode_deps_de_mod3 : opcode_deps_de;
opcode = (opcode >> 3) & 7;
break;
case 0xdf:
timings = mod3 ? opcode_timings_winchip2_df_mod3 : opcode_timings_winchip2_df;
deps = mod3 ? opcode_deps_df_mod3 : opcode_deps_df;
opcode = (opcode >> 3) & 7;
break;
default:
switch (opcode) {
case 0x80:
case 0x82:
case 0x83:
timings = mod3 ? opcode_timings_winchip2_8x_mod3 : opcode_timings_winchip2_8x;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0x81:
timings = mod3 ? opcode_timings_winchip2_81_mod3 : opcode_timings_winchip2_81;
deps = mod3 ? opcode_deps_81_mod3 : opcode_deps_81;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc0:
case 0xc1:
case 0xd0:
case 0xd1:
case 0xd2:
case 0xd3:
timings = mod3 ? opcode_timings_winchip2_shift_mod3 : opcode_timings_winchip2_shift;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf6:
timings = mod3 ? opcode_timings_winchip2_f6_mod3 : opcode_timings_winchip2_f6;
deps = mod3 ? opcode_deps_f6_mod3 : opcode_deps_f6;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf7:
timings = mod3 ? opcode_timings_winchip2_f7_mod3 : opcode_timings_winchip2_f7;
deps = mod3 ? opcode_deps_f7_mod3 : opcode_deps_f7;
opcode = (fetchdat >> 3) & 7;
break;
case 0xff:
timings = mod3 ? opcode_timings_winchip2_ff_mod3 : opcode_timings_winchip2_ff;
deps = mod3 ? opcode_deps_ff_mod3 : opcode_deps_ff;
opcode = (fetchdat >> 3) & 7;
break;
default:
timings = mod3 ? opcode_timings_winchip2_mod3 : opcode_timings_winchip2;
deps = mod3 ? opcode_deps_mod3 : opcode_deps;
break;
}
}
if (u_pipe_full) {
uint8_t regmask = get_srcdep_mask(deps[opcode], fetchdat, bit8, u_pipe_op_32);
if (can_pair(u_pipe_timings[u_pipe_opcode], timings[opcode], regmask)) {
int cycles_a = u_pipe_timings[u_pipe_opcode] & 0xff;
int cycles_b = timings[opcode] & 0xff;
uint32_t timing = (cycles_a > cycles_b) ? u_pipe_timings[u_pipe_opcode] : timings[opcode];
uint64_t temp_deps = 0;
if (check_agi(deps, opcode, fetchdat, op_32) || check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
agi_stall = 1;
codegen_instruction(&timing, &temp_deps, 0, 0, 0, 0, agi_stall);
u_pipe_full = 0;
decode_delay_offset = 0;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8) | u_pipe_regmask;
return;
} else {
/*No pairing, run first instruction now*/
if (check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
agi_stall = 1;
codegen_instruction(u_pipe_timings, u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_decode_delay_offset, u_pipe_op_32, agi_stall);
u_pipe_full = 0;
regmask_modified = u_pipe_regmask;
}
}
if (timings[opcode] & CYCLES_IS_MMX) {
/*Might pair with next instruction*/
u_pipe_full = 1;
u_pipe_opcode = opcode;
u_pipe_timings = timings;
u_pipe_op_32 = op_32;
u_pipe_regmask = get_dstdep_mask(deps[opcode], fetchdat, bit8);
u_pipe_fetchdat = fetchdat;
u_pipe_decode_delay_offset = decode_delay_offset;
u_pipe_deps = deps;
decode_delay_offset = 0;
return;
}
if (check_agi(deps, opcode, fetchdat, op_32))
agi_stall = 1;
codegen_instruction(timings, deps, opcode, fetchdat, decode_delay_offset, op_32, agi_stall);
decode_delay_offset = 0;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8);
}
static void
codegen_timing_winchip2_block_end(void)
{
if (u_pipe_full) {
int agi_stall = 0;
if (check_agi(u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_op_32))
agi_stall = 1;
codegen_instruction(u_pipe_timings, u_pipe_deps, u_pipe_opcode, u_pipe_fetchdat, u_pipe_decode_delay_offset, u_pipe_op_32, agi_stall);
u_pipe_full = 0;
}
}
codegen_timing_t codegen_timing_winchip2 = {
codegen_timing_winchip2_start,
codegen_timing_winchip2_prefix,
codegen_timing_winchip2_opcode,
codegen_timing_winchip2_block_start,
codegen_timing_winchip2_block_end,
NULL
};
``` | /content/code_sandbox/src/cpu/codegen_timing_winchip2.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 15,543 |
```objective-c
#define opFPU(name, optype, a_size, load_var, get, use_var, cycle_postfix) \
static int opFADD##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
if ((cpu_state.npxc >> 10) & 3) \
fesetround(rounding_modes[(cpu_state.npxc >> 10) & 3]); \
ST(0) += use_var; \
if ((cpu_state.npxc >> 10) & 3) \
fesetround(FE_TONEAREST); \
FP_TAG_VALID; \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd##cycle_postfix) : ((x87_timings.fadd##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd##cycle_postfix) : ((x87_concurrency.fadd##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFCOM##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3); \
cpu_state.npxs |= x87_compare(ST(0), (double) use_var); \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom##cycle_postfix) : ((x87_timings.fcom##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom##cycle_postfix) : ((x87_concurrency.fcom##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFCOMP##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3); \
cpu_state.npxs |= x87_compare(ST(0), (double) use_var); \
x87_pop(); \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom##cycle_postfix) : ((x87_timings.fcom##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom##cycle_postfix) : ((x87_concurrency.fcom##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFDIV##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
x87_div(ST(0), ST(0), use_var); \
FP_TAG_VALID; \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv##cycle_postfix) : ((x87_timings.fdiv##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd##cycle_postfix) : ((x87_concurrency.fadd##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFDIVR##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
x87_div(ST(0), use_var, ST(0)); \
FP_TAG_VALID; \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv##cycle_postfix) : ((x87_timings.fdiv##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv##cycle_postfix) : ((x87_concurrency.fdiv##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFMUL##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
ST(0) *= use_var; \
FP_TAG_VALID; \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fmul##cycle_postfix) : ((x87_timings.fmul##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fmul##cycle_postfix) : ((x87_concurrency.fmul##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFSUB##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
ST(0) -= use_var; \
FP_TAG_VALID; \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd##cycle_postfix) : ((x87_timings.fadd##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd##cycle_postfix) : ((x87_concurrency.fadd##cycle_postfix) * cpu_multi)); \
return 0; \
} \
static int opFSUBR##name##_a##a_size(uint32_t fetchdat) \
{ \
optype t; \
FP_ENTER(); \
fetch_ea_##a_size(fetchdat); \
SEG_CHECK_READ(cpu_state.ea_seg); \
load_var = get(); \
if (cpu_state.abrt) \
return 1; \
ST(0) = use_var - ST(0); \
FP_TAG_VALID; \
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd##cycle_postfix) : ((x87_timings.fadd##cycle_postfix) * cpu_multi)); \
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd##cycle_postfix) : ((x87_concurrency.fadd##cycle_postfix) * cpu_multi)); \
return 0; \
}
// clang-format off
opFPU(s, x87_ts, 16, t.i, geteal, t.s, _32)
#ifndef FPU_8087
opFPU(s, x87_ts, 32, t.i, geteal, t.s, _32)
#endif
opFPU(d, x87_td, 16, t.i, geteaq, t.d, _64)
#ifndef FPU_8087
opFPU(d, x87_td, 32, t.i, geteaq, t.d, _64)
#endif
opFPU(iw, uint16_t, 16, t, geteaw, (double) (int16_t) t, _i16)
#ifndef FPU_8087
opFPU(iw, uint16_t, 32, t, geteaw, (double) (int16_t) t, _i16)
#endif
opFPU(il, uint32_t, 16, t, geteal, (double) (int32_t) t, _i32)
#ifndef FPU_8087
opFPU(il, uint32_t, 32, t, geteal, (double) (int32_t) t, _i32)
#endif
// clang-format on
static int opFADD(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = ST(0) + ST(fetchdat & 7);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFADDr(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(fetchdat & 7) + ST(0);
FP_TAG_VALID_F;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFADDP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(fetchdat & 7) + ST(0);
FP_TAG_VALID_F;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFCOM(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
if (ST(0) == ST(fetchdat & 7))
cpu_state.npxs |= FPU_SW_C3;
else if (ST(0) < ST(fetchdat & 7))
cpu_state.npxs |= FPU_SW_C0;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
static int
opFCOMP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
cpu_state.npxs |= x87_compare(ST(0), ST(fetchdat & 7));
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
static int
opFCOMPP(uint32_t fetchdat)
{
uint64_t *p, *q;
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
p = (uint64_t *) &ST(0);
q = (uint64_t *) &ST(1);
if ((*p == ((uint64_t) 1 << 63) && *q == 0) && (fpu_type >= FPU_287XL))
cpu_state.npxs |= FPU_SW_C0; /*Nasty hack to fix 80387 detection*/
else
cpu_state.npxs |= x87_compare(ST(0), ST(1));
x87_pop();
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
opFUCOMPP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
cpu_state.npxs |= x87_ucompare(ST(0), ST(1));
x87_pop();
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
# ifndef OPS_286_386
static int
opFCOMI(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
flags_rebuild();
cpu_state.flags &= ~(Z_FLAG | P_FLAG | C_FLAG);
if (ST(0) == ST(fetchdat & 7))
cpu_state.flags |= Z_FLAG;
else if (ST(0) < ST(fetchdat & 7))
cpu_state.flags |= C_FLAG;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
static int
opFCOMIP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
flags_rebuild();
cpu_state.flags &= ~(Z_FLAG | P_FLAG | C_FLAG);
if (ST(0) == ST(fetchdat & 7))
cpu_state.flags |= Z_FLAG;
else if (ST(0) < ST(fetchdat & 7))
cpu_state.flags |= C_FLAG;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fcom) : (x87_timings.fcom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fcom) : (x87_concurrency.fcom * cpu_multi));
return 0;
}
# endif
#endif
static int
opFDIV(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_div(ST(0), ST(0), ST(fetchdat & 7));
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv) : (x87_timings.fdiv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv) : (x87_concurrency.fdiv * cpu_multi));
return 0;
}
static int
opFDIVr(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_div(ST(fetchdat & 7), ST(fetchdat & 7), ST(0));
FP_TAG_VALID_F;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv) : (x87_timings.fdiv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv) : (x87_concurrency.fdiv * cpu_multi));
return 0;
}
static int
opFDIVP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_div(ST(fetchdat & 7), ST(fetchdat & 7), ST(0));
FP_TAG_VALID_F;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv) : (x87_timings.fdiv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv) : (x87_concurrency.fdiv * cpu_multi));
return 0;
}
static int
opFDIVR(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_div(ST(0), ST(fetchdat & 7), ST(0));
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv) : (x87_timings.fdiv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv) : (x87_concurrency.fdiv * cpu_multi));
return 0;
}
static int
opFDIVRr(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_div(ST(fetchdat & 7), ST(0), ST(fetchdat & 7));
FP_TAG_VALID_F;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv) : (x87_timings.fdiv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv) : (x87_concurrency.fdiv * cpu_multi));
return 0;
}
static int
opFDIVRP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
x87_div(ST(fetchdat & 7), ST(0), ST(fetchdat & 7));
FP_TAG_VALID_F;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fdiv) : (x87_timings.fdiv * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fdiv) : (x87_concurrency.fdiv * cpu_multi));
return 0;
}
static int
opFMUL(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = ST(0) * ST(fetchdat & 7);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fmul) : (x87_timings.fmul * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fmul) : (x87_concurrency.fmul * cpu_multi));
return 0;
}
static int
opFMULr(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(0) * ST(fetchdat & 7);
FP_TAG_VALID_F;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fmul) : (x87_timings.fmul * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fmul) : (x87_concurrency.fmul * cpu_multi));
return 0;
}
static int
opFMULP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(0) * ST(fetchdat & 7);
FP_TAG_VALID_F;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fmul) : (x87_timings.fmul * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fmul) : (x87_concurrency.fmul * cpu_multi));
return 0;
}
static int
opFSUB(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = ST(0) - ST(fetchdat & 7);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFSUBr(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(fetchdat & 7) - ST(0);
FP_TAG_VALID_F;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFSUBP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(fetchdat & 7) - ST(0);
FP_TAG_VALID_F;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFSUBR(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(0) = ST(fetchdat & 7) - ST(0);
FP_TAG_VALID;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFSUBRr(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(0) - ST(fetchdat & 7);
FP_TAG_VALID_F;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
static int
opFSUBRP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
ST(fetchdat & 7) = ST(0) - ST(fetchdat & 7);
FP_TAG_VALID_F;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fadd) : (x87_timings.fadd * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fadd) : (x87_concurrency.fadd * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
opFUCOM(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
cpu_state.npxs |= x87_ucompare(ST(0), ST(fetchdat & 7));
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
static int
opFUCOMP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
cpu_state.npxs &= ~(FPU_SW_C0 | FPU_SW_C2 | FPU_SW_C3);
cpu_state.npxs |= x87_ucompare(ST(0), ST(fetchdat & 7));
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
#ifndef OPS_286_386
static int
opFUCOMI(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
flags_rebuild();
cpu_state.flags &= ~(Z_FLAG | P_FLAG | C_FLAG);
if (ST(0) == ST(fetchdat & 7))
cpu_state.flags |= Z_FLAG;
else if (ST(0) < ST(fetchdat & 7))
cpu_state.flags |= C_FLAG;
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
static int
opFUCOMIP(uint32_t fetchdat)
{
FP_ENTER();
cpu_state.pc++;
flags_rebuild();
cpu_state.flags &= ~(Z_FLAG | P_FLAG | C_FLAG);
if (ST(0) == ST(fetchdat & 7))
cpu_state.flags |= Z_FLAG;
else if (ST(0) < ST(fetchdat & 7))
cpu_state.flags |= C_FLAG;
x87_pop();
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fucom) : (x87_timings.fucom * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fucom) : (x87_concurrency.fucom * cpu_multi));
return 0;
}
# endif
#endif
``` | /content/code_sandbox/src/cpu/x87_ops_arith.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 6,559 |
```c
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <wchar.h>
#include <math.h>
#ifndef INFINITY
# define INFINITY (__builtin_inff())
#endif
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include "x86.h"
#include "x86_ops.h"
#include "x86seg_common.h"
#include "x87_sf.h"
#include "x87.h"
#include <86box/io.h>
#include <86box/nmi.h>
#include <86box/mem.h>
#include <86box/pic.h>
#include <86box/timer.h>
#include <86box/pit.h>
#include <86box/fdd.h>
#include <86box/fdc.h>
#include <86box/machine.h>
#include <86box/plat_fallthrough.h>
#include <86box/gdbstub.h>
#ifndef OPS_286_386
# define OPS_286_386
#endif
#include "x86seg.h"
#include "386_common.h"
#ifdef USE_NEW_DYNAREC
# include "codegen.h"
#endif
#undef CPU_BLOCK_END
#define CPU_BLOCK_END()
extern int codegen_flags_changed;
#ifdef ENABLE_386_LOG
int x386_do_log = ENABLE_386_LOG;
void
x386_log(const char *fmt, ...)
{
va_list ap;
if (x386_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define x386_log(fmt, ...)
#endif
#undef CPU_BLOCK_END
#define CPU_BLOCK_END()
#define getbytef() \
((uint8_t) (fetchdat)); \
cpu_state.pc++
#define getwordf() \
((uint16_t) (fetchdat)); \
cpu_state.pc += 2
#define getbyte2f() \
((uint8_t) (fetchdat >> 8)); \
cpu_state.pc++
#define getword2f() \
((uint16_t) (fetchdat >> 8)); \
cpu_state.pc += 2
static __inline void
fetch_ea_32_long(uint32_t rmdat)
{
easeg = cpu_state.ea_seg->base;
if (cpu_rm == 4) {
uint8_t sib = rmdat >> 8;
switch (cpu_mod) {
case 0:
cpu_state.eaaddr = cpu_state.regs[sib & 7].l;
cpu_state.pc++;
break;
case 1:
cpu_state.pc++;
cpu_state.eaaddr = ((uint32_t) (int8_t) getbyte()) + cpu_state.regs[sib & 7].l;
break;
case 2:
cpu_state.eaaddr = (fastreadl(cs + cpu_state.pc + 1)) + cpu_state.regs[sib & 7].l;
cpu_state.pc += 5;
break;
}
/*SIB byte present*/
if ((sib & 7) == 5 && !cpu_mod)
cpu_state.eaaddr = getlong();
else if ((sib & 6) == 4 && !cpu_state.ssegs) {
easeg = ss;
cpu_state.ea_seg = &cpu_state.seg_ss;
}
if (((sib >> 3) & 7) != 4)
cpu_state.eaaddr += cpu_state.regs[(sib >> 3) & 7].l << (sib >> 6);
} else {
cpu_state.eaaddr = cpu_state.regs[cpu_rm].l;
if (cpu_mod) {
if (cpu_rm == 5 && !cpu_state.ssegs) {
easeg = ss;
cpu_state.ea_seg = &cpu_state.seg_ss;
}
if (cpu_mod == 1) {
cpu_state.eaaddr += ((uint32_t) (int8_t) (rmdat >> 8));
cpu_state.pc++;
} else {
cpu_state.eaaddr += getlong();
}
} else if (cpu_rm == 5) {
cpu_state.eaaddr = getlong();
}
}
}
static __inline void
fetch_ea_16_long(uint32_t rmdat)
{
easeg = cpu_state.ea_seg->base;
if (!cpu_mod && cpu_rm == 6) {
cpu_state.eaaddr = getword();
} else {
switch (cpu_mod) {
case 0:
cpu_state.eaaddr = 0;
break;
case 1:
cpu_state.eaaddr = (uint16_t) (int8_t) (rmdat >> 8);
cpu_state.pc++;
break;
case 2:
cpu_state.eaaddr = getword();
break;
}
cpu_state.eaaddr += (*mod1add[0][cpu_rm]) + (*mod1add[1][cpu_rm]);
if (mod1seg[cpu_rm] == &ss && !cpu_state.ssegs) {
easeg = ss;
cpu_state.ea_seg = &cpu_state.seg_ss;
}
cpu_state.eaaddr &= 0xFFFF;
}
}
#define fetch_ea_16(rmdat) \
cpu_state.pc++; \
cpu_mod = (rmdat >> 6) & 3; \
cpu_reg = (rmdat >> 3) & 7; \
cpu_rm = rmdat & 7; \
if (cpu_mod != 3) { \
fetch_ea_16_long(rmdat); \
if (cpu_state.abrt) \
return 1; \
}
#define fetch_ea_32(rmdat) \
cpu_state.pc++; \
cpu_mod = (rmdat >> 6) & 3; \
cpu_reg = (rmdat >> 3) & 7; \
cpu_rm = rmdat & 7; \
if (cpu_mod != 3) { \
fetch_ea_32_long(rmdat); \
} \
if (cpu_state.abrt) \
return 1
#include "x86_flags.h"
#define PREFETCH_RUN(instr_cycles, bytes, modrm, reads, reads_l, writes, writes_l, ea32) \
do { \
if (cpu_prefetch_cycles) \
prefetch_run(instr_cycles, bytes, modrm, reads, reads_l, writes, writes_l, ea32); \
} while (0)
#define PREFETCH_PREFIX() \
do { \
if (cpu_prefetch_cycles) \
prefetch_prefixes++; \
} while (0)
#define PREFETCH_FLUSH() prefetch_flush()
#ifndef FPU_CYCLES
# define FPU_CYCLES
#endif
#define OP_TABLE(name) ops_2386_##name
#define CLOCK_CYCLES(c) \
{ \
if (fpu_cycles > 0) { \
fpu_cycles -= (c); \
if (fpu_cycles < 0) { \
cycles += fpu_cycles; \
} \
} else { \
cycles -= (c); \
} \
}
#define CLOCK_CYCLES_FPU(c) cycles -= (c)
#define CONCURRENCY_CYCLES(c) fpu_cycles = (c)
#define CLOCK_CYCLES_ALWAYS(c) cycles -= (c)
#define CHECK_READ_CS(size) \
if (msw & 1 && !(cpu_state.eflags & VM_FLAG) && !(cpu_state.seg_cs.access & 0x80)) \
x86np("Read from seg not present", cpu_state.seg_cs.seg & 0xfffc); \
else if ((cpu_state.pc < cpu_state.seg_cs.limit_low) || \
((cpu_state.pc + size - 1) > cpu_state.seg_cs.limit_high)) \
x86gpf("Limit check (READ CS)", 0);
#include "386_ops.h"
void
exec386_2386(int32_t cycs)
{
int ol;
int vector;
int tempi;
int32_t cycdiff;
int32_t oldcyc;
int32_t cycle_period;
int32_t ins_cycles;
uint32_t addr;
cycles += cycs;
while (cycles > 0) {
cycle_period = (timer_target - (uint32_t) tsc) + 1;
x86_was_reset = 0;
cycdiff = 0;
oldcyc = cycles;
while (cycdiff < cycle_period) {
int ins_fetch_fault = 0;
ins_cycles = cycles;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
oldcpl = CPL;
#endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
#ifndef USE_NEW_DYNAREC
x86_was_reset = 0;
#endif
cpu_state.ea_seg = &cpu_state.seg_ds;
cpu_state.ssegs = 0;
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
ol = opcode_length[fetchdat & 0xff];
if ((ol == 3) && opcode_has_modrm[fetchdat & 0xff] && (((fetchdat >> 14) & 0x03) == 0x03))
ol = 2;
if (cpu_16bitbus) {
CHECK_READ_CS(MIN(ol, 2));
} else {
CHECK_READ_CS(MIN(ol, 4));
}
ins_fetch_fault = cpu_386_check_instruction_fault();
/* Breakpoint fault has priority over other faults. */
if (ins_fetch_fault) {
ins_fetch_fault = 0;
cpu_state.abrt = 1;
}
if (!cpu_state.abrt) {
#ifdef ENABLE_386_LOG
if (in_smm)
x386_log("[%04X:%08X] %08X\n", CS, cpu_state.pc, fetchdat);
#endif
opcode = fetchdat & 0xFF;
fetchdat >>= 8;
trap |= !!(cpu_state.flags & T_FLAG);
cpu_state.pc++;
cpu_state.eflags &= ~(RF_FLAG);
if (opcode == 0xf0)
in_lock = 1;
x86_2386_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
in_lock = 0;
if (x86_was_reset)
break;
}
#ifdef ENABLE_386_LOG
else if (in_smm)
x386_log("[%04X:%08X] ABRT\n", CS, cpu_state.pc);
#endif
#ifndef USE_NEW_DYNAREC
if (!use32)
cpu_state.pc &= 0xffff;
#endif
if (cpu_end_block_after_ins)
cpu_end_block_after_ins--;
if (cpu_state.abrt) {
flags_rebuild();
tempi = cpu_state.abrt & ABRT_MASK;
cpu_state.abrt = 0;
x86_doabrt_2386(tempi);
if (cpu_state.abrt) {
cpu_state.abrt = 0;
#ifndef USE_NEW_DYNAREC
CS = oldcs;
#endif
cpu_state.pc = cpu_state.oldpc;
x386_log("Double fault\n");
pmodeint_2386(8, 0);
if (cpu_state.abrt) {
cpu_state.abrt = 0;
softresetx86();
cpu_set_edx();
#ifdef ENABLE_386_LOG
x386_log("Triple fault - reset\n");
#endif
}
}
} else if (trap) {
flags_rebuild();
if (trap & 2) dr[6] |= 0x8000;
if (trap & 1) dr[6] |= 0x4000;
trap = 0;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
x86_int(1);
}
if (smi_line)
enter_smm_check(0);
else if (nmi && nmi_enable && nmi_mask) {
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
x86_int(2);
nmi_enable = 0;
#ifdef OLD_NMI_BEHAVIOR
if (nmi_auto_clear) {
nmi_auto_clear = 0;
nmi = 0;
}
#else
nmi = 0;
#endif
} else if ((cpu_state.flags & I_FLAG) && pic.int_pending && !cpu_end_block_after_ins) {
vector = picinterrupt();
if (vector != -1) {
flags_rebuild();
if (msw & 1)
pmodeint_2386(vector, 0);
else {
writememw(ss, (SP - 2) & 0xFFFF, cpu_state.flags);
writememw(ss, (SP - 4) & 0xFFFF, CS);
writememw(ss, (SP - 6) & 0xFFFF, cpu_state.pc);
SP -= 6;
addr = (vector << 2) + idt.base;
cpu_state.flags &= ~I_FLAG;
cpu_state.flags &= ~T_FLAG;
cpu_state.pc = readmemw(0, addr);
loadcs_2386(readmemw(0, addr + 2));
}
}
}
ins_cycles -= cycles;
tsc += ins_cycles;
cycdiff = oldcyc - cycles;
if (timetolive) {
timetolive--;
if (!timetolive)
fatal("Life expired\n");
}
if (TIMER_VAL_LESS_THAN_VAL(timer_target, (uint32_t) tsc))
timer_process();
#ifdef USE_GDBSTUB
if (gdbstub_instruction())
return;
#endif
}
}
}
``` | /content/code_sandbox/src/cpu/386.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 3,101 |
```objective-c
static int
opBT_w_r_a16(uint32_t fetchdat)
{
uint16_t temp;
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].w / 16) * 2);
eal_r = 0;
temp = geteaw();
if (cpu_state.abrt)
return 1;
flags_rebuild();
if (temp & (1 << (cpu_state.regs[cpu_reg].w & 15)))
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 2, rmdat, 1, 0, 0, 0, 0);
return 0;
}
static int
opBT_w_r_a32(uint32_t fetchdat)
{
uint16_t temp;
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].w / 16) * 2);
eal_r = 0;
temp = geteaw();
if (cpu_state.abrt)
return 1;
flags_rebuild();
if (temp & (1 << (cpu_state.regs[cpu_reg].w & 15)))
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 2, rmdat, 1, 0, 0, 0, 1);
return 0;
}
static int
opBT_l_r_a16(uint32_t fetchdat)
{
uint32_t temp;
fetch_ea_16(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].l / 32) * 4);
eal_r = 0;
temp = geteal();
if (cpu_state.abrt)
return 1;
flags_rebuild();
if (temp & (1 << (cpu_state.regs[cpu_reg].l & 31)))
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 2, rmdat, 0, 1, 0, 0, 0);
return 0;
}
static int
opBT_l_r_a32(uint32_t fetchdat)
{
uint32_t temp;
fetch_ea_32(fetchdat);
SEG_CHECK_READ(cpu_state.ea_seg);
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].l / 32) * 4);
eal_r = 0;
temp = geteal();
if (cpu_state.abrt)
return 1;
flags_rebuild();
if (temp & (1 << (cpu_state.regs[cpu_reg].l & 31)))
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 2, rmdat, 0, 1, 0, 0, 1);
return 0;
}
#define opBT(name, operation) \
static int opBT##name##_w_r_a16(uint32_t fetchdat) \
{ \
int tempc; \
uint16_t temp; \
\
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].w / 16) * 2); \
eal_r = eal_w = 0; \
temp = geteaw(); \
if (cpu_state.abrt) \
return 1; \
tempc = (temp & (1 << (cpu_state.regs[cpu_reg].w & 15))) ? 1 : 0; \
temp operation(1 << (cpu_state.regs[cpu_reg].w & 15)); \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
else \
cpu_state.flags &= ~C_FLAG; \
\
CLOCK_CYCLES(6); \
PREFETCH_RUN(6, 2, rmdat, 1, 0, 1, 0, 0); \
return 0; \
} \
static int opBT##name##_w_r_a32(uint32_t fetchdat) \
{ \
int tempc; \
uint16_t temp; \
\
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].w / 16) * 2); \
eal_r = eal_w = 0; \
temp = geteaw(); \
if (cpu_state.abrt) \
return 1; \
tempc = (temp & (1 << (cpu_state.regs[cpu_reg].w & 15))) ? 1 : 0; \
temp operation(1 << (cpu_state.regs[cpu_reg].w & 15)); \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
else \
cpu_state.flags &= ~C_FLAG; \
\
CLOCK_CYCLES(6); \
PREFETCH_RUN(6, 2, rmdat, 1, 0, 1, 0, 1); \
return 0; \
} \
static int opBT##name##_l_r_a16(uint32_t fetchdat) \
{ \
int tempc; \
uint32_t temp; \
\
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].l / 32) * 4); \
eal_r = eal_w = 0; \
temp = geteal(); \
if (cpu_state.abrt) \
return 1; \
tempc = (temp & (1 << (cpu_state.regs[cpu_reg].l & 31))) ? 1 : 0; \
temp operation(1 << (cpu_state.regs[cpu_reg].l & 31)); \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
else \
cpu_state.flags &= ~C_FLAG; \
\
CLOCK_CYCLES(6); \
PREFETCH_RUN(6, 2, rmdat, 0, 1, 0, 1, 0); \
return 0; \
} \
static int opBT##name##_l_r_a32(uint32_t fetchdat) \
{ \
int tempc; \
uint32_t temp; \
\
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
cpu_state.eaaddr += ((cpu_state.regs[cpu_reg].l / 32) * 4); \
eal_r = eal_w = 0; \
temp = geteal(); \
if (cpu_state.abrt) \
return 1; \
tempc = (temp & (1 << (cpu_state.regs[cpu_reg].l & 31))) ? 1 : 0; \
temp operation(1 << (cpu_state.regs[cpu_reg].l & 31)); \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
else \
cpu_state.flags &= ~C_FLAG; \
\
CLOCK_CYCLES(6); \
PREFETCH_RUN(6, 2, rmdat, 0, 1, 0, 1, 1); \
return 0; \
}
// clang-format off
opBT(C, ^=)
opBT(R, &= ~)
opBT(S, |=)
// clang-format on
static int
opBA_w_a16(uint32_t fetchdat)
{
int tempc;
int count;
uint16_t temp;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
count = getbyte();
if (cpu_state.abrt)
return 1;
tempc = temp & (1 << count);
flags_rebuild();
switch (rmdat & 0x38) {
case 0x20: /*BT w,imm*/
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
case 0x28: /*BTS w,imm*/
temp |= (1 << count);
break;
case 0x30: /*BTR w,imm*/
temp &= ~(1 << count);
break;
case 0x38: /*BTC w,imm*/
temp ^= (1 << count);
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
seteaw(temp);
if (cpu_state.abrt)
return 1;
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
return 0;
}
static int
opBA_w_a32(uint32_t fetchdat)
{
int tempc;
int count;
uint16_t temp;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
count = getbyte();
if (cpu_state.abrt)
return 1;
tempc = temp & (1 << count);
flags_rebuild();
switch (rmdat & 0x38) {
case 0x20: /*BT w,imm*/
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
case 0x28: /*BTS w,imm*/
temp |= (1 << count);
break;
case 0x30: /*BTR w,imm*/
temp &= ~(1 << count);
break;
case 0x38: /*BTC w,imm*/
temp ^= (1 << count);
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
seteaw(temp);
if (cpu_state.abrt)
return 1;
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
return 0;
}
static int
opBA_l_a16(uint32_t fetchdat)
{
int tempc;
int count;
uint32_t temp;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
count = getbyte();
if (cpu_state.abrt)
return 1;
tempc = temp & (1 << count);
flags_rebuild();
switch (rmdat & 0x38) {
case 0x20: /*BT w,imm*/
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return 0;
case 0x28: /*BTS w,imm*/
temp |= (1 << count);
break;
case 0x30: /*BTR w,imm*/
temp &= ~(1 << count);
break;
case 0x38: /*BTC w,imm*/
temp ^= (1 << count);
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
seteal(temp);
if (cpu_state.abrt)
return 1;
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
return 0;
}
static int
opBA_l_a32(uint32_t fetchdat)
{
int tempc;
int count;
uint32_t temp;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
count = getbyte();
if (cpu_state.abrt)
return 1;
tempc = temp & (1 << count);
flags_rebuild();
switch (rmdat & 0x38) {
case 0x20: /*BT w,imm*/
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return 0;
case 0x28: /*BTS w,imm*/
temp |= (1 << count);
break;
case 0x30: /*BTR w,imm*/
temp &= ~(1 << count);
break;
case 0x38: /*BTC w,imm*/
temp ^= (1 << count);
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
seteal(temp);
if (cpu_state.abrt)
return 1;
if (tempc)
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_bit.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 3,686 |
```objective-c
static int
opPUNPCKLDQ_a16(uint32_t fetchdat)
{
uint32_t usrc;
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
src = MMX_GETREG(cpu_rm);
dst = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
dst->l[1] = src.l[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
usrc = readmeml(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 0;
dst->l[1] = usrc;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKLDQ_a32(uint32_t fetchdat)
{
uint32_t usrc;
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
src = MMX_GETREG(cpu_rm);
dst = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
dst->l[1] = src.l[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
usrc = readmeml(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 0;
dst->l[1] = usrc;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKHDQ_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->l[0] = dst->l[1];
dst->l[1] = src.l[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKHDQ_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->l[0] = dst->l[1];
dst->l[1] = src.l[1];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKLBW_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[7] = src.b[3];
dst->b[6] = dst->b[3];
dst->b[5] = src.b[2];
dst->b[4] = dst->b[2];
dst->b[3] = src.b[1];
dst->b[2] = dst->b[1];
dst->b[1] = src.b[0];
dst->b[0] = dst->b[0];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKLBW_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[7] = src.b[3];
dst->b[6] = dst->b[3];
dst->b[5] = src.b[2];
dst->b[4] = dst->b[2];
dst->b[3] = src.b[1];
dst->b[2] = dst->b[1];
dst->b[1] = src.b[0];
dst->b[0] = dst->b[0];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKHBW_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[0] = dst->b[4];
dst->b[1] = src.b[4];
dst->b[2] = dst->b[5];
dst->b[3] = src.b[5];
dst->b[4] = dst->b[6];
dst->b[5] = src.b[6];
dst->b[6] = dst->b[7];
dst->b[7] = src.b[7];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKHBW_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[0] = dst->b[4];
dst->b[1] = src.b[4];
dst->b[2] = dst->b[5];
dst->b[3] = src.b[5];
dst->b[4] = dst->b[6];
dst->b[5] = src.b[6];
dst->b[6] = dst->b[7];
dst->b[7] = src.b[7];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKLWD_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->w[3] = src.w[1];
dst->w[2] = dst->w[1];
dst->w[1] = src.w[0];
dst->w[0] = dst->w[0];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKLWD_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->w[3] = src.w[1];
dst->w[2] = dst->w[1];
dst->w[1] = src.w[0];
dst->w[0] = dst->w[0];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKHWD_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->w[0] = dst->w[2];
dst->w[1] = src.w[2];
dst->w[2] = dst->w[3];
dst->w[3] = src.w[3];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPUNPCKHWD_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->w[0] = dst->w[2];
dst->w[1] = src.w[2];
dst->w[2] = dst->w[3];
dst->w[3] = src.w[3];
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPACKSSWB_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->sb[0] = SSATB(dst->sw[0]);
dst->sb[1] = SSATB(dst->sw[1]);
dst->sb[2] = SSATB(dst->sw[2]);
dst->sb[3] = SSATB(dst->sw[3]);
dst->sb[4] = SSATB(src.sw[0]);
dst->sb[5] = SSATB(src.sw[1]);
dst->sb[6] = SSATB(src.sw[2]);
dst->sb[7] = SSATB(src.sw[3]);
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPACKSSWB_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->sb[0] = SSATB(dst->sw[0]);
dst->sb[1] = SSATB(dst->sw[1]);
dst->sb[2] = SSATB(dst->sw[2]);
dst->sb[3] = SSATB(dst->sw[3]);
dst->sb[4] = SSATB(src.sw[0]);
dst->sb[5] = SSATB(src.sw[1]);
dst->sb[6] = SSATB(src.sw[2]);
dst->sb[7] = SSATB(src.sw[3]);
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPACKUSWB_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[0] = USATB(dst->sw[0]);
dst->b[1] = USATB(dst->sw[1]);
dst->b[2] = USATB(dst->sw[2]);
dst->b[3] = USATB(dst->sw[3]);
dst->b[4] = USATB(src.sw[0]);
dst->b[5] = USATB(src.sw[1]);
dst->b[6] = USATB(src.sw[2]);
dst->b[7] = USATB(src.sw[3]);
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPACKUSWB_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSRC();
dst->b[0] = USATB(dst->sw[0]);
dst->b[1] = USATB(dst->sw[1]);
dst->b[2] = USATB(dst->sw[2]);
dst->b[3] = USATB(dst->sw[3]);
dst->b[4] = USATB(src.sw[0]);
dst->b[5] = USATB(src.sw[1]);
dst->b[6] = USATB(src.sw[2]);
dst->b[7] = USATB(src.sw[3]);
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPACKSSDW_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_REG dst2;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
dst2 = *dst;
MMX_GETSRC();
dst->sw[0] = SSATW(dst2.sl[0]);
dst->sw[1] = SSATW(dst2.sl[1]);
dst->sw[2] = SSATW(src.sl[0]);
dst->sw[3] = SSATW(src.sl[1]);
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPACKSSDW_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_REG dst2;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
dst2 = *dst;
MMX_GETSRC();
dst->sw[0] = SSATW(dst2.sl[0]);
dst->sw[1] = SSATW(dst2.sl[1]);
dst->sw[2] = SSATW(src.sl[0]);
dst->sw[3] = SSATW(src.sl[1]);
MMX_SETEXP(cpu_reg);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mmx_pack.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 3,013 |
```objective-c
#define INC_DEC_OP(name, reg, inc, setflags) \
static int op##name(uint32_t fetchdat) \
{ \
setflags(reg, 1); \
reg += inc; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 1, -1, 0, 0, 0, 0, 0); \
return 0; \
}
INC_DEC_OP(INC_AX, AX, 1, setadd16nc)
INC_DEC_OP(INC_BX, BX, 1, setadd16nc)
INC_DEC_OP(INC_CX, CX, 1, setadd16nc)
INC_DEC_OP(INC_DX, DX, 1, setadd16nc)
INC_DEC_OP(INC_SI, SI, 1, setadd16nc)
INC_DEC_OP(INC_DI, DI, 1, setadd16nc)
INC_DEC_OP(INC_BP, BP, 1, setadd16nc)
INC_DEC_OP(INC_SP, SP, 1, setadd16nc)
INC_DEC_OP(INC_EAX, EAX, 1, setadd32nc)
INC_DEC_OP(INC_EBX, EBX, 1, setadd32nc)
INC_DEC_OP(INC_ECX, ECX, 1, setadd32nc)
INC_DEC_OP(INC_EDX, EDX, 1, setadd32nc)
INC_DEC_OP(INC_ESI, ESI, 1, setadd32nc)
INC_DEC_OP(INC_EDI, EDI, 1, setadd32nc)
INC_DEC_OP(INC_EBP, EBP, 1, setadd32nc)
INC_DEC_OP(INC_ESP, ESP, 1, setadd32nc)
INC_DEC_OP(DEC_AX, AX, -1, setsub16nc)
INC_DEC_OP(DEC_BX, BX, -1, setsub16nc)
INC_DEC_OP(DEC_CX, CX, -1, setsub16nc)
INC_DEC_OP(DEC_DX, DX, -1, setsub16nc)
INC_DEC_OP(DEC_SI, SI, -1, setsub16nc)
INC_DEC_OP(DEC_DI, DI, -1, setsub16nc)
INC_DEC_OP(DEC_BP, BP, -1, setsub16nc)
INC_DEC_OP(DEC_SP, SP, -1, setsub16nc)
INC_DEC_OP(DEC_EAX, EAX, -1, setsub32nc)
INC_DEC_OP(DEC_EBX, EBX, -1, setsub32nc)
INC_DEC_OP(DEC_ECX, ECX, -1, setsub32nc)
INC_DEC_OP(DEC_EDX, EDX, -1, setsub32nc)
INC_DEC_OP(DEC_ESI, ESI, -1, setsub32nc)
INC_DEC_OP(DEC_EDI, EDI, -1, setsub32nc)
INC_DEC_OP(DEC_EBP, EBP, -1, setsub32nc)
INC_DEC_OP(DEC_ESP, ESP, -1, setsub32nc)
static int
opINCDEC_b_a16(uint32_t fetchdat)
{
uint8_t temp;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
if (rmdat & 0x38) {
seteab(temp - 1);
if (cpu_state.abrt)
return 1;
setsub8nc(temp, 1);
} else {
seteab(temp + 1);
if (cpu_state.abrt)
return 1;
setadd8nc(temp, 1);
}
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
return 0;
}
static int
opINCDEC_b_a32(uint32_t fetchdat)
{
uint8_t temp;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
if (rmdat & 0x38) {
seteab(temp - 1);
if (cpu_state.abrt)
return 1;
setsub8nc(temp, 1);
} else {
seteab(temp + 1);
if (cpu_state.abrt)
return 1;
setadd8nc(temp, 1);
}
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_inc_dec.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,155 |
```objective-c
static int
opMOVD_l_mm_a16(uint32_t fetchdat)
{
uint32_t dst;
MMX_REG *op;
MMX_ENTER();
fetch_ea_16(fetchdat);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
op->l[0] = cpu_state.regs[cpu_rm].l;
op->l[1] = 0;
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
dst = readmeml(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
op->l[0] = dst;
op->l[1] = 0;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opMOVD_l_mm_a32(uint32_t fetchdat)
{
uint32_t dst;
MMX_REG *op;
MMX_ENTER();
fetch_ea_32(fetchdat);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
op->l[0] = cpu_state.regs[cpu_rm].l;
op->l[1] = 0;
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
dst = readmeml(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
op->l[0] = dst;
op->l[1] = 0;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opMOVD_mm_l_a16(uint32_t fetchdat)
{
MMX_REG *op;
MMX_ENTER();
fetch_ea_16(fetchdat);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].l = op->l[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
writememl(easeg, cpu_state.eaaddr, op->l[0]);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
return 0;
}
static int
opMOVD_mm_l_a32(uint32_t fetchdat)
{
MMX_REG *op;
MMX_ENTER();
fetch_ea_32(fetchdat);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].l = op->l[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
writememl(easeg, cpu_state.eaaddr, op->l[0]);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
return 0;
}
#ifdef USE_CYRIX_6X86
/*Cyrix maps both MOVD and SMINT to the same opcode*/
static int
opMOVD_mm_l_a16_cx(uint32_t fetchdat)
{
const MMX_REG *op;
if (in_smm)
return opSMINT(fetchdat);
MMX_ENTER();
fetch_ea_16(fetchdat);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].l = op->l[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
writememl(easeg, cpu_state.eaaddr, op->l[0]);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
return 0;
}
static int
opMOVD_mm_l_a32_cx(uint32_t fetchdat)
{
const MMX_REG *op;
if (in_smm)
return opSMINT(fetchdat);
MMX_ENTER();
fetch_ea_32(fetchdat);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
cpu_state.regs[cpu_rm].l = op->l[0];
CLOCK_CYCLES(1);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 3);
writememl(easeg, cpu_state.eaaddr, op->l[0]);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
return 0;
}
#endif /* USE_CYRIX_6X86 */
static int
opMOVQ_q_mm_a16(uint32_t fetchdat)
{
uint64_t dst;
MMX_REG src;
MMX_REG *op;
MMX_ENTER();
fetch_ea_16(fetchdat);
src = MMX_GETREG(cpu_rm);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
op->q = src.q;
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
dst = readmemq(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
op->q = dst;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opMOVQ_q_mm_a32(uint32_t fetchdat)
{
uint64_t dst;
MMX_REG src;
MMX_REG *op;
MMX_ENTER();
fetch_ea_32(fetchdat);
src = MMX_GETREG(cpu_rm);
op = MMX_GETREGP(cpu_reg);
if (cpu_mod == 3) {
op->q = src.q;
CLOCK_CYCLES(1);
} else {
SEG_CHECK_READ(cpu_state.ea_seg);
dst = readmemq(easeg, cpu_state.eaaddr);
if (cpu_state.abrt)
return 1;
op->q = dst;
CLOCK_CYCLES(2);
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opMOVQ_mm_q_a16(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_16(fetchdat);
src = MMX_GETREG(cpu_reg);
dst = MMX_GETREGP(cpu_rm);
if (cpu_mod == 3) {
dst->q = src.q;
CLOCK_CYCLES(1);
MMX_SETEXP(cpu_rm);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 7);
writememq(easeg, cpu_state.eaaddr, src.q);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
return 0;
}
static int
opMOVQ_mm_q_a32(uint32_t fetchdat)
{
MMX_REG src;
MMX_REG *dst;
MMX_ENTER();
fetch_ea_32(fetchdat);
src = MMX_GETREG(cpu_reg);
dst = MMX_GETREGP(cpu_rm);
if (cpu_mod == 3) {
dst->q = src.q;
CLOCK_CYCLES(1);
MMX_SETEXP(cpu_rm);
} else {
SEG_CHECK_WRITE(cpu_state.ea_seg);
CHECK_WRITE_COMMON(cpu_state.ea_seg, cpu_state.eaaddr, cpu_state.eaaddr + 7);
writememq(easeg, cpu_state.eaaddr, src.q);
if (cpu_state.abrt)
return 1;
CLOCK_CYCLES(2);
}
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mmx_mov.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 1,861 |
```objective-c
#define X87_TAG_VALID 0
#define X87_TAG_ZERO 1
#define X87_TAG_INVALID 2
#define X87_TAG_EMPTY 3
extern uint32_t x87_pc_off;
extern uint32_t x87_op_off;
extern uint16_t x87_pc_seg;
extern uint16_t x87_op_seg;
static __inline void
x87_set_mmx(void)
{
uint64_t *p;
if (fpu_softfloat) {
fpu_state.tag = 0;
fpu_state.tos = 0; /* reset FPU Top-Of-Stack */
} else {
cpu_state.TOP = 0;
p = (uint64_t *) cpu_state.tag;
*p = 0x0101010101010101ULL;
}
cpu_state.ismmx = 1;
}
static __inline void
x87_emms(void)
{
uint64_t *p;
if (fpu_softfloat) {
fpu_state.tag = 0xffff;
fpu_state.tos = 0; /* reset FPU Top-Of-Stack */
} else {
p = (uint64_t *) cpu_state.tag;
*p = 0;
}
cpu_state.ismmx = 0;
}
uint16_t x87_gettag(void);
void x87_settag(uint16_t new_tag);
#define TAG_EMPTY 0
#define TAG_VALID (1 << 0)
/*Hack for FPU copy. If set then MM[].q contains the 64-bit integer loaded by FILD*/
#ifdef USE_NEW_DYNAREC
# define TAG_UINT64 (1 << 7)
#else
# define TAG_UINT64 (1 << 2)
#endif
/*Old dynarec stuff.*/
#define TAG_NOT_UINT64 0xfb
#define X87_ROUNDING_NEAREST 0
#define X87_ROUNDING_DOWN 1
#define X87_ROUNDING_UP 2
#define X87_ROUNDING_CHOP 3
void codegen_set_rounding_mode(int mode);
/* Status Word */
#define FPU_SW_Backward (0x8000) /* backward compatibility */
#define FPU_SW_C3 (0x4000) /* condition bit 3 */
#define FPU_SW_Top (0x3800) /* top of stack */
#define FPU_SW_C2 (0x0400) /* condition bit 2 */
#define FPU_SW_C1 (0x0200) /* condition bit 1 */
#define FPU_SW_C0 (0x0100) /* condition bit 0 */
#define FPU_SW_Summary (0x0080) /* exception summary */
#define FPU_SW_Stack_Fault (0x0040) /* stack fault */
#define FPU_SW_Precision (0x0020) /* loss of precision */
#define FPU_SW_Underflow (0x0010) /* underflow */
#define FPU_SW_Overflow (0x0008) /* overflow */
#define FPU_SW_Zero_Div (0x0004) /* divide by zero */
#define FPU_SW_Denormal_Op (0x0002) /* denormalized operand */
#define FPU_SW_Invalid (0x0001) /* invalid operation */
#define FPU_SW_CC (FPU_SW_C0|FPU_SW_C1|FPU_SW_C2|FPU_SW_C3)
#define FPU_SW_Exceptions_Mask (0x027f) /* status word exceptions bit mask */
/* Exception flags: */
#define FPU_EX_Precision (0x0020) /* loss of precision */
#define FPU_EX_Underflow (0x0010) /* underflow */
#define FPU_EX_Overflow (0x0008) /* overflow */
#define FPU_EX_Zero_Div (0x0004) /* divide by zero */
#define FPU_EX_Denormal (0x0002) /* denormalized operand */
#define FPU_EX_Invalid (0x0001) /* invalid operation */
/* Special exceptions: */
#define FPU_EX_Stack_Overflow (0x0041|FPU_SW_C1) /* stack overflow */
#define FPU_EX_Stack_Underflow (0x0041) /* stack underflow */
/* precision control */
#define FPU_EX_Precision_Lost_Up (EX_Precision | SW_C1)
#define FPU_EX_Precision_Lost_Dn (EX_Precision)
#define setcc(cc) \
fpu_state.swd = (fpu_state.swd & ~(FPU_SW_CC)) | ((cc) & FPU_SW_CC)
#define clear_C1() { fpu_state.swd &= ~FPU_SW_C1; }
#define clear_C2() { fpu_state.swd &= ~FPU_SW_C2; }
/* ************ */
/* Control Word */
/* ************ */
#define FPU_CW_Reserved_Bits (0xe0c0) /* reserved bits */
#define FPU_CW_Inf (0x1000) /* infinity control, legacy */
#define FPU_CW_RC (0x0C00) /* rounding control */
#define FPU_CW_PC (0x0300) /* precision control */
#define FPU_RC_RND (0x0000) /* rounding control */
#define FPU_RC_DOWN (0x0400)
#define FPU_RC_UP (0x0800)
#define FPU_RC_CHOP (0x0C00)
#define FPU_CW_Precision (0x0020) /* loss of precision mask */
#define FPU_CW_Underflow (0x0010) /* underflow mask */
#define FPU_CW_Overflow (0x0008) /* overflow mask */
#define FPU_CW_Zero_Div (0x0004) /* divide by zero mask */
#define FPU_CW_Denormal (0x0002) /* denormalized operand mask */
#define FPU_CW_Invalid (0x0001) /* invalid operation mask */
#define FPU_CW_Exceptions_Mask (0x003f) /* all masks */
/* Precision control bits affect only the following:
ADD, SUB(R), MUL, DIV(R), and SQRT */
#define FPU_PR_32_BITS (0x000)
#define FPU_PR_RESERVED_BITS (0x100)
#define FPU_PR_64_BITS (0x200)
#define FPU_PR_80_BITS (0x300)
#include "softfloat3e/softfloat.h"
static __inline int
is_IA_masked(void)
{
return (fpu_state.cwd & FPU_CW_Invalid);
}
struct softfloat_status_t i387cw_to_softfloat_status_word(uint16_t control_word);
uint16_t FPU_exception(uint32_t fetchdat, uint16_t exceptions, int store);
int FPU_status_word_flags_fpu_compare(int float_relation);
void FPU_write_eflags_fpu_compare(int float_relation);
void FPU_stack_overflow(uint32_t fetchdat);
void FPU_stack_underflow(uint32_t fetchdat, int stnr, int pop_stack);
int FPU_handle_NaN32(extFloat80_t a, float32 b, extFloat80_t *r, struct softfloat_status_t *status);
int FPU_handle_NaN64(extFloat80_t a, float64 b, extFloat80_t *r, struct softfloat_status_t *status);
int FPU_tagof(const extFloat80_t reg);
uint8_t pack_FPU_TW(uint16_t twd);
uint16_t unpack_FPU_TW(uint16_t tag_byte);
static __inline uint16_t
i387_get_control_word(void)
{
return (fpu_state.cwd);
}
static __inline uint16_t
i387_get_status_word(void)
{
return (fpu_state.swd & ~FPU_SW_Top & 0xFFFF) | ((fpu_state.tos << 11) & FPU_SW_Top);
}
#define IS_TAG_EMPTY(i) \
(FPU_gettagi(i) == X87_TAG_EMPTY)
static __inline int
FPU_gettagi(int stnr)
{
return (fpu_state.tag >> (((stnr + fpu_state.tos) & 7) * 2)) & 3;
}
static __inline void
FPU_settagi_valid(int stnr)
{
int regnr = (stnr + fpu_state.tos) & 7;
fpu_state.tag &= ~(3 << (regnr * 2)); // FPU_Tag_Valid == '00
}
static __inline void
FPU_settagi(int tag, int stnr)
{
int regnr = (stnr + fpu_state.tos) & 7;
fpu_state.tag &= ~(3 << (regnr * 2));
fpu_state.tag |= (tag & 3) << (regnr * 2);
}
static __inline void
FPU_push(void)
{
fpu_state.tos = (fpu_state.tos - 1) & 7;
}
static __inline void
FPU_pop(void)
{
fpu_state.tag |= (3 << (fpu_state.tos * 2));
fpu_state.tos = (fpu_state.tos + 1) & 7;
}
static __inline extFloat80_t
FPU_read_regi(int stnr)
{
return fpu_state.st_space[(stnr + fpu_state.tos) & 7];
}
// it is only possible to read FPU tag word through certain
// instructions like FNSAVE, and they update tag word to its
// real value anyway
static __inline void
FPU_save_regi(extFloat80_t reg, int stnr)
{
fpu_state.st_space[(stnr + fpu_state.tos) & 7] = reg;
FPU_settagi_valid(stnr);
}
static __inline void
FPU_save_regi_tag(extFloat80_t reg, int tag, int stnr)
{
fpu_state.st_space[(stnr + fpu_state.tos) & 7] = reg;
FPU_settagi(tag, stnr);
}
#define FPU_check_pending_exceptions() \
do { \
if (fpu_state.swd & FPU_SW_Summary) { \
pclog("SW Summary.\n"); \
if (cr0 & 0x20) { \
x86_int(16); \
return 1; \
} else { \
picint(1 << 13); \
return 1; \
} \
} \
} while (0)
``` | /content/code_sandbox/src/cpu/x87.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,350 |
```c
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <wchar.h>
#include <math.h>
#ifndef INFINITY
# define INFINITY (__builtin_inff())
#endif
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include <86box/timer.h>
#include "x86.h"
#include "x86seg_common.h"
#include "x87_sf.h"
#include "x87.h"
#include <86box/nmi.h>
#include <86box/mem.h>
#include <86box/smram.h>
#include <86box/pic.h>
#include <86box/pit.h>
#include <86box/fdd.h>
#include <86box/fdc.h>
#include <86box/keyboard.h>
#include <86box/timer.h>
#include "x86seg.h"
#include "386_common.h"
#include "x86_flags.h"
#include <86box/plat_unused.h>
#ifdef USE_DYNAREC
# include "codegen.h"
# define CPU_BLOCK_END() cpu_block_end = 1
#else
# define CPU_BLOCK_END()
#endif
x86seg gdt;
x86seg ldt;
x86seg idt;
x86seg tr;
uint32_t cr2;
uint32_t cr3;
uint32_t cr4;
uint32_t dr[8];
uint32_t use32;
int stack32;
int cpu_init = 0;
uint32_t *eal_r;
uint32_t *eal_w;
int nmi_enable = 1;
int alt_access;
int cpl_override = 0;
#ifdef USE_NEW_DYNAREC
uint16_t cpu_cur_status = 0;
#else
uint32_t cpu_cur_status = 0;
#endif
extern uint8_t *pccache2;
extern int optype;
extern uint32_t pccache;
int in_sys = 0;
int unmask_a20_in_smm = 0;
uint32_t old_rammask = 0xffffffff;
int soft_reset_mask = 0;
int smi_latched = 0;
int smm_in_hlt = 0;
int smi_block = 0;
int prefetch_prefixes = 0;
int rf_flag_no_clear = 0;
int tempc;
int oldcpl;
int optype;
int inttype;
int oddeven = 0;
int timetolive;
uint16_t oldcs;
uint32_t oldds;
uint32_t oldss;
uint32_t olddslimit;
uint32_t oldsslimit;
uint32_t olddslimitw;
uint32_t oldsslimitw;
uint32_t oxpc;
uint32_t rmdat32;
uint32_t backupregs[16];
x86seg _oldds;
int opcode_has_modrm[256] = {
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*00*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*10*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*20*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /*30*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*40*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*50*/
0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, /*60*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*70*/
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /*80*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*90*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*a0*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*b0*/
1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /*c0*/
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, /*d0*/
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*e0*/
0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, /*f0*/
};
int opcode_length[256] = { 3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 3, 3, 1, 3, /* 0x0x */
3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 3, 3, 1, 1, /* 0x1x */
3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 3, 3, 1, 1, /* 0x2x */
3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 3, 3, 1, 1, /* 0x3x */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x4x */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 0x5x */
1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 2, 3, 1, 1, 1, 1, /* 0x6x */
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, /* 0x7x */
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, /* 0x8x */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, /* 0x9x */
3, 3, 3, 3, 1, 1, 1, 1, 2, 3, 1, 1, 1, 1, 1, 1, /* 0xax */
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, /* 0xbx */
3, 3, 3, 1, 3, 3, 3, 3, 3, 1, 3, 1, 1, 2, 1, 1, /* 0xcx */
3, 3, 3, 3, 2, 2, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, /* 0xdx */
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 1, 1, 1, 1, /* 0xex */
1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 3, 3 }; /* 0xfx */
/* 0 = no, 1 = always, 2 = depends on second opcode, 3 = depends on mod/rm */
int lock_legal[256] = { 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 2, /* 0x0x */
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, /* 0x1x */
1, 1, 1, 1, 1, 1, 4, 0, 1, 1, 1, 1, 1, 1, 4, 0, /* 0x2x */
1, 1, 1, 1, 1, 1, 4, 0, 0, 0, 0, 0, 0, 0, 4, 0, /* 0x3x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x4x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x5x */
0, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x6x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x7x */
3, 3, 3, 3, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x8x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x9x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xax */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xbx */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xcx */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xdx */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xex */
0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 3, 3 }; /* 0xfx */
int lock_legal_0f[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x1x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x2x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x3x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x4x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x5x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x6x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x7x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x8x */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x9x */
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, /* 0xax */
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, /* 0xbx */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xcx */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xdx */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xex */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* 0xfx */
/* (modrm >> 3) & 0x07 */
int lock_legal_ba[8] = { 0, 0, 0, 0, 1, 1, 1, 1 };
/* Also applies to 81, 82, and 83 */
int lock_legal_80[8] = { 1, 1, 1, 1, 1, 1, 1, 0 };
/* Also applies to F7 */
int lock_legal_f6[8] = { 0, 0, 1, 1, 0, 0, 0, 0 };
/* Also applies to FF */
int lock_legal_fe[8] = { 1, 1, 0, 0, 0, 0, 0, 0 };
uint32_t addr64;
uint32_t addr64_2;
uint32_t addr64a[8];
uint32_t addr64a_2[8];
static pc_timer_t *cpu_fast_off_timer = NULL;
static double cpu_fast_off_period = 0.0;
#define AMD_SYSCALL_EIP (msr.amd_star & 0xFFFFFFFF)
#define AMD_SYSCALL_SB ((msr.amd_star >> 32) & 0xFFFF)
#define AMD_SYSRET_SB ((msr.amd_star >> 48) & 0xFFFF)
/* These #define's and enum have been borrowed from Bochs. */
/* SMM feature masks */
#define SMM_IO_INSTRUCTION_RESTART (0x00010000)
#define SMM_SMBASE_RELOCATION (0x00020000)
#define SMM_REVISION (0x20000000)
/* TODO: Which CPU added SMBASE relocation? */
#define SMM_REVISION_ID (SMM_SMBASE_RELOCATION | SMM_IO_INSTRUCTION_RESTART | SMM_REVISION)
#define SMM_SAVE_STATE_MAP_SIZE 128
enum SMMRAM_Fields_386_To_P5 {
SMRAM_FIELD_P5_CR0 = 0, /* 1FC */
SMRAM_FIELD_P5_CR3, /* 1F8 */
SMRAM_FIELD_P5_EFLAGS, /* 1F4 */
SMRAM_FIELD_P5_EIP, /* 1F0 */
SMRAM_FIELD_P5_EDI, /* 1EC */
SMRAM_FIELD_P5_ESI, /* 1E8 */
SMRAM_FIELD_P5_EBP, /* 1E4 */
SMRAM_FIELD_P5_ESP, /* 1E0 */
SMRAM_FIELD_P5_EBX, /* 1DC */
SMRAM_FIELD_P5_EDX, /* 1D8 */
SMRAM_FIELD_P5_ECX, /* 1D4 */
SMRAM_FIELD_P5_EAX, /* 1D0 */
SMRAM_FIELD_P5_DR6, /* 1CC */
SMRAM_FIELD_P5_DR7, /* 1C8 */
SMRAM_FIELD_P5_TR_SELECTOR, /* 1C4 */
SMRAM_FIELD_P5_LDTR_SELECTOR, /* 1C0 */
SMRAM_FIELD_P5_GS_SELECTOR, /* 1BC */
SMRAM_FIELD_P5_FS_SELECTOR, /* 1B8 */
SMRAM_FIELD_P5_DS_SELECTOR, /* 1B4 */
SMRAM_FIELD_P5_SS_SELECTOR, /* 1B0 */
SMRAM_FIELD_P5_CS_SELECTOR, /* 1AC */
SMRAM_FIELD_P5_ES_SELECTOR, /* 1A8 */
SMRAM_FIELD_P5_TR_ACCESS, /* 1A4 */
SMRAM_FIELD_P5_TR_BASE, /* 1A0 */
SMRAM_FIELD_P5_TR_LIMIT, /* 19C */
SMRAM_FIELD_P5_IDTR_ACCESS, /* 198 */
SMRAM_FIELD_P5_IDTR_BASE, /* 194 */
SMRAM_FIELD_P5_IDTR_LIMIT, /* 190 */
SMRAM_FIELD_P5_GDTR_ACCESS, /* 18C */
SMRAM_FIELD_P5_GDTR_BASE, /* 188 */
SMRAM_FIELD_P5_GDTR_LIMIT, /* 184 */
SMRAM_FIELD_P5_LDTR_ACCESS, /* 180 */
SMRAM_FIELD_P5_LDTR_BASE, /* 17C */
SMRAM_FIELD_P5_LDTR_LIMIT, /* 178 */
SMRAM_FIELD_P5_GS_ACCESS, /* 174 */
SMRAM_FIELD_P5_GS_BASE, /* 170 */
SMRAM_FIELD_P5_GS_LIMIT, /* 16C */
SMRAM_FIELD_P5_FS_ACCESS, /* 168 */
SMRAM_FIELD_P5_FS_BASE, /* 164 */
SMRAM_FIELD_P5_FS_LIMIT, /* 160 */
SMRAM_FIELD_P5_DS_ACCESS, /* 15C */
SMRAM_FIELD_P5_DS_BASE, /* 158 */
SMRAM_FIELD_P5_DS_LIMIT, /* 154 */
SMRAM_FIELD_P5_SS_ACCESS, /* 150 */
SMRAM_FIELD_P5_SS_BASE, /* 14C */
SMRAM_FIELD_P5_SS_LIMIT, /* 148 */
SMRAM_FIELD_P5_CS_ACCESS, /* 144 */
SMRAM_FIELD_P5_CS_BASE, /* 140 */
SMRAM_FIELD_P5_CS_LIMIT, /* 13C */
SMRAM_FIELD_P5_ES_ACCESS, /* 138 */
SMRAM_FIELD_P5_ES_BASE, /* 134 */
SMRAM_FIELD_P5_ES_LIMIT, /* 130 */
SMRAM_FIELD_P5_UNWRITTEN_1, /* 12C */
SMRAM_FIELD_P5_CR4, /* 128 */
SMRAM_FIELD_P5_ALTERNATE_DR6, /* 124 */
SMRAM_FIELD_P5_RESERVED_1, /* 120 */
SMRAM_FIELD_P5_RESERVED_2, /* 11C */
SMRAM_FIELD_P5_RESERVED_3, /* 118 */
SMRAM_FIELD_P5_RESERVED_4, /* 114 */
SMRAM_FIELD_P5_IO_RESTART_EIP, /* 110 */
SMRAM_FIELD_P5_IO_RESTART_ESI, /* 10C */
SMRAM_FIELD_P5_IO_RESTART_ECX, /* 108 */
SMRAM_FIELD_P5_IO_RESTART_EDI, /* 104 */
SMRAM_FIELD_P5_AUTOHALT_RESTART, /* 100 */
SMRAM_FIELD_P5_SMM_REVISION_ID, /* 0FC */
SMRAM_FIELD_P5_SMBASE_OFFSET, /* 0F8 */
SMRAM_FIELD_AM486_CR2, /* 0F4 */
SMRAM_FIELD_AM486_DR0, /* 0F0 */
SMRAM_FIELD_AM486_DR1, /* 0EC */
SMRAM_FIELD_AM486_DR2, /* 0E8 */
SMRAM_FIELD_AM486_DR3, /* 0E4 */
SMRAM_FIELD_P5_LAST
};
enum SMMRAM_Fields_P6 {
SMRAM_FIELD_P6_CR0 = 0, /* 1FC */
SMRAM_FIELD_P6_CR3, /* 1F8 */
SMRAM_FIELD_P6_EFLAGS, /* 1F4 */
SMRAM_FIELD_P6_EIP, /* 1F0 */
SMRAM_FIELD_P6_EDI, /* 1EC */
SMRAM_FIELD_P6_ESI, /* 1E8 */
SMRAM_FIELD_P6_EBP, /* 1E4 */
SMRAM_FIELD_P6_ESP, /* 1E0 */
SMRAM_FIELD_P6_EBX, /* 1DC */
SMRAM_FIELD_P6_EDX, /* 1D8 */
SMRAM_FIELD_P6_ECX, /* 1D4 */
SMRAM_FIELD_P6_EAX, /* 1D0 */
SMRAM_FIELD_P6_DR6, /* 1CC */
SMRAM_FIELD_P6_DR7, /* 1C8 */
SMRAM_FIELD_P6_TR_SELECTOR, /* 1C4 */
SMRAM_FIELD_P6_LDTR_SELECTOR, /* 1C0 */
SMRAM_FIELD_P6_GS_SELECTOR, /* 1BC */
SMRAM_FIELD_P6_FS_SELECTOR, /* 1B8 */
SMRAM_FIELD_P6_DS_SELECTOR, /* 1B4 */
SMRAM_FIELD_P6_SS_SELECTOR, /* 1B0 */
SMRAM_FIELD_P6_CS_SELECTOR, /* 1AC */
SMRAM_FIELD_P6_ES_SELECTOR, /* 1A8 */
SMRAM_FIELD_P6_SS_BASE, /* 1A4 */
SMRAM_FIELD_P6_SS_LIMIT, /* 1A0 */
SMRAM_FIELD_P6_SS_SELECTOR_AR, /* 19C */
SMRAM_FIELD_P6_CS_BASE, /* 198 */
SMRAM_FIELD_P6_CS_LIMIT, /* 194 */
SMRAM_FIELD_P6_CS_SELECTOR_AR, /* 190 */
SMRAM_FIELD_P6_ES_BASE, /* 18C */
SMRAM_FIELD_P6_ES_LIMIT, /* 188 */
SMRAM_FIELD_P6_ES_SELECTOR_AR, /* 184 */
SMRAM_FIELD_P6_LDTR_BASE, /* 180 */
SMRAM_FIELD_P6_LDTR_LIMIT, /* 17C */
SMRAM_FIELD_P6_LDTR_SELECTOR_AR, /* 178 */
SMRAM_FIELD_P6_GDTR_BASE, /* 174 */
SMRAM_FIELD_P6_GDTR_LIMIT, /* 170 */
SMRAM_FIELD_P6_GDTR_SELECTOR_AR, /* 16C */
SMRAM_FIELD_P6_SREG_STATUS1, /* 168 */
SMRAM_FIELD_P6_TR_BASE, /* 164 */
SMRAM_FIELD_P6_TR_LIMIT, /* 160 */
SMRAM_FIELD_P6_TR_SELECTOR_AR, /* 15C */
SMRAM_FIELD_P6_IDTR_BASE, /* 158 */
SMRAM_FIELD_P6_IDTR_LIMIT, /* 154 */
SMRAM_FIELD_P6_IDTR_SELECTOR_AR, /* 150 */
SMRAM_FIELD_P6_GS_BASE, /* 14C */
SMRAM_FIELD_P6_GS_LIMIT, /* 148 */
SMRAM_FIELD_P6_GS_SELECTOR_AR, /* 144 */
SMRAM_FIELD_P6_FS_BASE, /* 140 */
SMRAM_FIELD_P6_FS_LIMIT, /* 13C */
SMRAM_FIELD_P6_FS_SELECTOR_AR, /* 138 */
SMRAM_FIELD_P6_DS_BASE, /* 134 */
SMRAM_FIELD_P6_DS_LIMIT, /* 130 */
SMRAM_FIELD_P6_DS_SELECTOR_AR, /* 12C */
SMRAM_FIELD_P6_SREG_STATUS0, /* 128 */
SMRAM_FIELD_P6_ALTERNATIVE_DR6, /* 124 */
SMRAM_FIELD_P6_CPL, /* 120 */
SMRAM_FIELD_P6_SMM_STATUS, /* 11C */
SMRAM_FIELD_P6_A20M, /* 118 */
SMRAM_FIELD_P6_CR4, /* 114 */
SMRAM_FIELD_P6_IO_RESTART_EIP, /* 110 */
SMRAM_FIELD_P6_IO_RESTART_ESI, /* 10C */
SMRAM_FIELD_P6_IO_RESTART_ECX, /* 108 */
SMRAM_FIELD_P6_IO_RESTART_EDI, /* 104 */
SMRAM_FIELD_P6_AUTOHALT_RESTART, /* 100 */
SMRAM_FIELD_P6_SMM_REVISION_ID, /* 0FC */
SMRAM_FIELD_P6_SMBASE_OFFSET, /* 0F8 */
SMRAM_FIELD_P6_LAST
};
enum SMMRAM_Fields_AMD_K {
SMRAM_FIELD_AMD_K_CR0 = 0, /* 1FC */
SMRAM_FIELD_AMD_K_CR3, /* 1F8 */
SMRAM_FIELD_AMD_K_EFLAGS, /* 1F4 */
SMRAM_FIELD_AMD_K_EIP, /* 1F0 */
SMRAM_FIELD_AMD_K_EDI, /* 1EC */
SMRAM_FIELD_AMD_K_ESI, /* 1E8 */
SMRAM_FIELD_AMD_K_EBP, /* 1E4 */
SMRAM_FIELD_AMD_K_ESP, /* 1E0 */
SMRAM_FIELD_AMD_K_EBX, /* 1DC */
SMRAM_FIELD_AMD_K_EDX, /* 1D8 */
SMRAM_FIELD_AMD_K_ECX, /* 1D4 */
SMRAM_FIELD_AMD_K_EAX, /* 1D0 */
SMRAM_FIELD_AMD_K_DR6, /* 1CC */
SMRAM_FIELD_AMD_K_DR7, /* 1C8 */
SMRAM_FIELD_AMD_K_TR_SELECTOR, /* 1C4 */
SMRAM_FIELD_AMD_K_LDTR_SELECTOR, /* 1C0 */
SMRAM_FIELD_AMD_K_GS_SELECTOR, /* 1BC */
SMRAM_FIELD_AMD_K_FS_SELECTOR, /* 1B8 */
SMRAM_FIELD_AMD_K_DS_SELECTOR, /* 1B4 */
SMRAM_FIELD_AMD_K_SS_SELECTOR, /* 1B0 */
SMRAM_FIELD_AMD_K_CS_SELECTOR, /* 1AC */
SMRAM_FIELD_AMD_K_ES_SELECTOR, /* 1A8 */
SMRAM_FIELD_AMD_K_IO_RESTART_DWORD, /* 1A4 */
SMRAM_FIELD_AMD_K_RESERVED_1, /* 1A0 */
SMRAM_FIELD_AMD_K_IO_RESTART_EIP, /* 19C */
SMRAM_FIELD_AMD_K_RESERVED_2, /* 198 */
SMRAM_FIELD_AMD_K_RESERVED_3, /* 194 */
SMRAM_FIELD_AMD_K_IDTR_BASE, /* 190 */
SMRAM_FIELD_AMD_K_IDTR_LIMIT, /* 18C */
SMRAM_FIELD_AMD_K_GDTR_BASE, /* 188 */
SMRAM_FIELD_AMD_K_GDTR_LIMIT, /* 184 */
SMRAM_FIELD_AMD_K_TR_ACCESS, /* 180 */
SMRAM_FIELD_AMD_K_TR_BASE, /* 17C */
SMRAM_FIELD_AMD_K_TR_LIMIT, /* 178 */
SMRAM_FIELD_AMD_K_LDTR_ACCESS, /* 174 - reserved on K6 */
SMRAM_FIELD_AMD_K_LDTR_BASE, /* 170 */
SMRAM_FIELD_AMD_K_LDTR_LIMIT, /* 16C */
SMRAM_FIELD_AMD_K_GS_ACCESS, /* 168 */
SMRAM_FIELD_AMD_K_GS_BASE, /* 164 */
SMRAM_FIELD_AMD_K_GS_LIMIT, /* 160 */
SMRAM_FIELD_AMD_K_FS_ACCESS, /* 15C */
SMRAM_FIELD_AMD_K_FS_BASE, /* 158 */
SMRAM_FIELD_AMD_K_FS_LIMIT, /* 154 */
SMRAM_FIELD_AMD_K_DS_ACCESS, /* 150 */
SMRAM_FIELD_AMD_K_DS_BASE, /* 14C */
SMRAM_FIELD_AMD_K_DS_LIMIT, /* 148 */
SMRAM_FIELD_AMD_K_SS_ACCESS, /* 144 */
SMRAM_FIELD_AMD_K_SS_BASE, /* 140 */
SMRAM_FIELD_AMD_K_SS_LIMIT, /* 13C */
SMRAM_FIELD_AMD_K_CS_ACCESS, /* 138 */
SMRAM_FIELD_AMD_K_CS_BASE, /* 134 */
SMRAM_FIELD_AMD_K_CS_LIMIT, /* 130 */
SMRAM_FIELD_AMD_K_ES_ACCESS, /* 12C */
SMRAM_FIELD_AMD_K_ES_BASE, /* 128 */
SMRAM_FIELD_AMD_K_ES_LIMIT, /* 124 */
SMRAM_FIELD_AMD_K_RESERVED_4, /* 120 */
SMRAM_FIELD_AMD_K_RESERVED_5, /* 11C */
SMRAM_FIELD_AMD_K_RESERVED_6, /* 118 */
SMRAM_FIELD_AMD_K_CR2, /* 114 */
SMRAM_FIELD_AMD_K_CR4, /* 110 */
SMRAM_FIELD_AMD_K_IO_RESTART_ESI, /* 10C */
SMRAM_FIELD_AMD_K_IO_RESTART_ECX, /* 108 */
SMRAM_FIELD_AMD_K_IO_RESTART_EDI, /* 104 */
SMRAM_FIELD_AMD_K_AUTOHALT_RESTART, /* 100 */
SMRAM_FIELD_AMD_K_SMM_REVISION_ID, /* 0FC */
SMRAM_FIELD_AMD_K_SMBASE_OFFSET, /* 0F8 */
SMRAM_FIELD_AMD_K_LAST
};
#ifdef ENABLE_386_COMMON_LOG
int x386_common_do_log = ENABLE_386_COMMON_LOG;
void
x386_common_log(const char *fmt, ...)
{
va_list ap;
if (x386_common_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define x386_common_log(fmt, ...)
#endif
int
is_lock_legal(uint32_t fetchdat)
{
int legal = 1;
if (is386) {
fetch_dat_t fetch_dat;
fetch_dat.fd = fetchdat;
legal = lock_legal[fetch_dat.b[0]];
if (legal == 1)
legal = 1; // ((fetch_dat.b[1] >> 6) != 0x03); /* reg is illegal */
else if (legal == 2) {
legal = lock_legal_0f[fetch_dat.b[1]];
if (legal == 1)
legal = ((fetch_dat.b[2] >> 6) != 0x03); /* reg,reg is illegal */
else if (legal == 3) {
legal = lock_legal_ba[(fetch_dat.b[2] >> 3) & 0x07];
if (legal == 1)
legal = ((fetch_dat.b[2] >> 6) != 0x03); /* reg,imm is illegal */
}
} else if (legal == 3) switch(fetch_dat.b[0]) {
case 0x80 ... 0x83:
legal = lock_legal_80[(fetch_dat.b[1] >> 3) & 0x07];
if (legal == 1)
legal = ((fetch_dat.b[1] >> 6) != 0x03); /* reg is illegal */
break;
case 0xf6 ... 0xf7:
legal = lock_legal_f6[(fetch_dat.b[1] >> 3) & 0x07];
if (legal == 1)
legal = ((fetch_dat.b[1] >> 6) != 0x03); /* reg is illegal */
break;
case 0xfe ... 0xff:
legal = lock_legal_fe[(fetch_dat.b[1] >> 3) & 0x07];
if (legal == 1)
legal = ((fetch_dat.b[1] >> 6) != 0x03); /* reg is illegal */
break;
default:
legal = 0;
break;
}
}
return legal;
}
/*Prefetch emulation is a fairly simplistic model:
- All instruction bytes must be fetched before it starts.
- Cycles used for non-instruction memory accesses are counted and subtracted
from the total cycles taken
- Any remaining cycles are used to refill the prefetch queue.
Note that this is only used for 286 / 386 systems. It is disabled when the
internal cache on 486+ CPUs is enabled.
*/
static int prefetch_bytes = 0;
void
prefetch_run(int instr_cycles, int bytes, int modrm, int reads, int reads_l, int writes, int writes_l, int ea32)
{
int mem_cycles = reads * cpu_cycles_read + reads_l * cpu_cycles_read_l + writes * cpu_cycles_write + writes_l * cpu_cycles_write_l;
if (instr_cycles < mem_cycles)
instr_cycles = mem_cycles;
prefetch_bytes -= prefetch_prefixes;
prefetch_bytes -= bytes;
if (modrm != -1) {
if (ea32) {
if ((modrm & 7) == 4) {
if ((modrm & 0x700) == 0x500)
prefetch_bytes -= 5;
else if ((modrm & 0xc0) == 0x40)
prefetch_bytes -= 2;
else if ((modrm & 0xc0) == 0x80)
prefetch_bytes -= 5;
} else {
if ((modrm & 0xc7) == 0x05)
prefetch_bytes -= 4;
else if ((modrm & 0xc0) == 0x40)
prefetch_bytes--;
else if ((modrm & 0xc0) == 0x80)
prefetch_bytes -= 4;
}
} else {
if ((modrm & 0xc7) == 0x06)
prefetch_bytes -= 2;
else if ((modrm & 0xc0) != 0xc0)
prefetch_bytes -= ((modrm & 0xc0) >> 6);
}
}
/* Fill up prefetch queue */
while (prefetch_bytes < 0) {
prefetch_bytes += cpu_prefetch_width;
cycles -= cpu_prefetch_cycles;
}
/* Subtract cycles used for memory access by instruction */
instr_cycles -= mem_cycles;
while (instr_cycles >= cpu_prefetch_cycles) {
prefetch_bytes += cpu_prefetch_width;
instr_cycles -= cpu_prefetch_cycles;
}
prefetch_prefixes = 0;
if (prefetch_bytes > 16)
prefetch_bytes = 16;
}
void
prefetch_flush(void)
{
prefetch_bytes = 0;
}
static __inline void
set_stack32(int s)
{
if ((cr0 & 1) && !(cpu_state.eflags & VM_FLAG))
stack32 = s;
else
stack32 = 0;
if (stack32)
cpu_cur_status |= CPU_STATUS_STACK32;
else
cpu_cur_status &= ~CPU_STATUS_STACK32;
}
static __inline void
set_use32(int u)
{
if ((cr0 & 1) && !(cpu_state.eflags & VM_FLAG))
use32 = u ? 0x300 : 0;
else
use32 = 0;
if (use32)
cpu_cur_status |= CPU_STATUS_USE32;
else
cpu_cur_status &= ~CPU_STATUS_USE32;
}
static void
smm_seg_load(x86seg *s)
{
if (!is386)
s->base &= 0x00ffffff;
if ((s->access & 0x18) != 0x10 || !(s->access & (1 << 2))) {
/* Expand down. */
s->limit_high = s->limit;
s->limit_low = 0;
} else {
s->limit_high = (s->ar_high & 0x40) ? 0xffffffff : 0xffff;
s->limit_low = s->limit + 1;
}
if ((cr0 & 1) && !(cpu_state.eflags & VM_FLAG))
s->checked = s->seg ? 1 : 0;
else
s->checked = 1;
if (s == &cpu_state.seg_cs)
set_use32(s->ar_high & 0x40);
if (s == &cpu_state.seg_ds) {
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
}
if (s == &cpu_state.seg_ss) {
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
set_stack32((s->ar_high & 0x40) ? 1 : 0);
}
}
static void
smram_save_state_p5(uint32_t *saved_state, int in_hlt)
{
saved_state[SMRAM_FIELD_P5_SMM_REVISION_ID] = SMM_REVISION_ID;
saved_state[SMRAM_FIELD_P5_SMBASE_OFFSET] = smbase;
for (uint8_t n = 0; n < 8; n++)
saved_state[SMRAM_FIELD_P5_EAX - n] = cpu_state.regs[n].l;
if (in_hlt)
saved_state[SMRAM_FIELD_P5_AUTOHALT_RESTART] = 1;
else
saved_state[SMRAM_FIELD_P5_AUTOHALT_RESTART] = 0;
saved_state[SMRAM_FIELD_P5_EIP] = cpu_state.pc;
saved_state[SMRAM_FIELD_P5_EFLAGS] = (cpu_state.eflags << 16) | (cpu_state.flags);
saved_state[SMRAM_FIELD_P5_CR0] = cr0;
saved_state[SMRAM_FIELD_P5_CR3] = cr3;
saved_state[SMRAM_FIELD_P5_CR4] = cr4;
saved_state[SMRAM_FIELD_P5_DR6] = dr[6];
saved_state[SMRAM_FIELD_P5_DR7] = dr[7];
/* TR */
saved_state[SMRAM_FIELD_P5_TR_SELECTOR] = tr.seg;
saved_state[SMRAM_FIELD_P5_TR_BASE] = tr.base;
saved_state[SMRAM_FIELD_P5_TR_LIMIT] = tr.limit;
saved_state[SMRAM_FIELD_P5_TR_ACCESS] = (tr.ar_high << 16) | (tr.access << 8);
/* LDTR */
saved_state[SMRAM_FIELD_P5_LDTR_SELECTOR] = ldt.seg;
saved_state[SMRAM_FIELD_P5_LDTR_BASE] = ldt.base;
saved_state[SMRAM_FIELD_P5_LDTR_LIMIT] = ldt.limit;
saved_state[SMRAM_FIELD_P5_LDTR_ACCESS] = (ldt.ar_high << 16) | (ldt.access << 8);
/* IDTR */
saved_state[SMRAM_FIELD_P5_IDTR_BASE] = idt.base;
saved_state[SMRAM_FIELD_P5_IDTR_LIMIT] = idt.limit;
saved_state[SMRAM_FIELD_P5_IDTR_ACCESS] = (idt.ar_high << 16) | (idt.access << 8);
/* GDTR */
saved_state[SMRAM_FIELD_P5_GDTR_BASE] = gdt.base;
saved_state[SMRAM_FIELD_P5_GDTR_LIMIT] = gdt.limit;
saved_state[SMRAM_FIELD_P5_GDTR_ACCESS] = (gdt.ar_high << 16) | (gdt.access << 8);
/* ES */
saved_state[SMRAM_FIELD_P5_ES_SELECTOR] = cpu_state.seg_es.seg;
saved_state[SMRAM_FIELD_P5_ES_BASE] = cpu_state.seg_es.base;
saved_state[SMRAM_FIELD_P5_ES_LIMIT] = cpu_state.seg_es.limit;
saved_state[SMRAM_FIELD_P5_ES_ACCESS] = (cpu_state.seg_es.ar_high << 16) | (cpu_state.seg_es.access << 8);
/* CS */
saved_state[SMRAM_FIELD_P5_CS_SELECTOR] = cpu_state.seg_cs.seg;
saved_state[SMRAM_FIELD_P5_CS_BASE] = cpu_state.seg_cs.base;
saved_state[SMRAM_FIELD_P5_CS_LIMIT] = cpu_state.seg_cs.limit;
saved_state[SMRAM_FIELD_P5_CS_ACCESS] = (cpu_state.seg_cs.ar_high << 16) | (cpu_state.seg_cs.access << 8);
/* DS */
saved_state[SMRAM_FIELD_P5_DS_SELECTOR] = cpu_state.seg_ds.seg;
saved_state[SMRAM_FIELD_P5_DS_BASE] = cpu_state.seg_ds.base;
saved_state[SMRAM_FIELD_P5_DS_LIMIT] = cpu_state.seg_ds.limit;
saved_state[SMRAM_FIELD_P5_DS_ACCESS] = (cpu_state.seg_ds.ar_high << 16) | (cpu_state.seg_ds.access << 8);
/* SS */
saved_state[SMRAM_FIELD_P5_SS_SELECTOR] = cpu_state.seg_ss.seg;
saved_state[SMRAM_FIELD_P5_SS_BASE] = cpu_state.seg_ss.base;
saved_state[SMRAM_FIELD_P5_SS_LIMIT] = cpu_state.seg_ss.limit;
saved_state[SMRAM_FIELD_P5_SS_ACCESS] = (cpu_state.seg_ss.ar_high << 16) | (cpu_state.seg_ss.access << 8);
/* FS */
saved_state[SMRAM_FIELD_P5_FS_SELECTOR] = cpu_state.seg_fs.seg;
saved_state[SMRAM_FIELD_P5_FS_BASE] = cpu_state.seg_fs.base;
saved_state[SMRAM_FIELD_P5_FS_LIMIT] = cpu_state.seg_fs.limit;
saved_state[SMRAM_FIELD_P5_FS_ACCESS] = (cpu_state.seg_fs.ar_high << 16) | (cpu_state.seg_fs.access << 8);
/* GS */
saved_state[SMRAM_FIELD_P5_GS_SELECTOR] = cpu_state.seg_gs.seg;
saved_state[SMRAM_FIELD_P5_GS_BASE] = cpu_state.seg_gs.base;
saved_state[SMRAM_FIELD_P5_GS_LIMIT] = cpu_state.seg_gs.limit;
saved_state[SMRAM_FIELD_P5_GS_ACCESS] = (cpu_state.seg_gs.ar_high << 16) | (cpu_state.seg_gs.access << 8);
/* Am486/5x86 stuff */
if (!is_pentium) {
saved_state[SMRAM_FIELD_AM486_CR2] = cr2;
saved_state[SMRAM_FIELD_AM486_DR0] = dr[0];
saved_state[SMRAM_FIELD_AM486_DR1] = dr[1];
saved_state[SMRAM_FIELD_AM486_DR2] = dr[2];
saved_state[SMRAM_FIELD_AM486_DR3] = dr[3];
}
}
static void
smram_restore_state_p5(uint32_t *saved_state)
{
for (uint8_t n = 0; n < 8; n++)
cpu_state.regs[n].l = saved_state[SMRAM_FIELD_P5_EAX - n];
if (saved_state[SMRAM_FIELD_P5_AUTOHALT_RESTART] & 0xffff)
cpu_state.pc = saved_state[SMRAM_FIELD_P5_EIP] - 1;
else
cpu_state.pc = saved_state[SMRAM_FIELD_P5_EIP];
cpu_state.eflags = saved_state[SMRAM_FIELD_P5_EFLAGS] >> 16;
cpu_state.flags = saved_state[SMRAM_FIELD_P5_EFLAGS] & 0xffff;
cr0 = saved_state[SMRAM_FIELD_P5_CR0];
cr3 = saved_state[SMRAM_FIELD_P5_CR3];
cr4 = saved_state[SMRAM_FIELD_P5_CR4];
dr[6] = saved_state[SMRAM_FIELD_P5_DR6];
dr[7] = saved_state[SMRAM_FIELD_P5_DR7];
/* TR */
tr.seg = saved_state[SMRAM_FIELD_P5_TR_SELECTOR];
tr.base = saved_state[SMRAM_FIELD_P5_TR_BASE];
tr.limit = saved_state[SMRAM_FIELD_P5_TR_LIMIT];
tr.access = (saved_state[SMRAM_FIELD_P5_TR_ACCESS] >> 8) & 0xff;
tr.ar_high = (saved_state[SMRAM_FIELD_P5_TR_ACCESS] >> 16) & 0xff;
smm_seg_load(&tr);
/* LDTR */
ldt.seg = saved_state[SMRAM_FIELD_P5_LDTR_SELECTOR];
ldt.base = saved_state[SMRAM_FIELD_P5_LDTR_BASE];
ldt.limit = saved_state[SMRAM_FIELD_P5_LDTR_LIMIT];
ldt.access = (saved_state[SMRAM_FIELD_P5_LDTR_ACCESS] >> 8) & 0xff;
ldt.ar_high = (saved_state[SMRAM_FIELD_P5_LDTR_ACCESS] >> 16) & 0xff;
smm_seg_load(&ldt);
/* IDTR */
idt.base = saved_state[SMRAM_FIELD_P5_IDTR_BASE];
idt.limit = saved_state[SMRAM_FIELD_P5_IDTR_LIMIT];
idt.access = (saved_state[SMRAM_FIELD_P5_IDTR_ACCESS] >> 8) & 0xff;
idt.ar_high = (saved_state[SMRAM_FIELD_P5_IDTR_ACCESS] >> 16) & 0xff;
/* GDTR */
gdt.base = saved_state[SMRAM_FIELD_P5_GDTR_BASE];
gdt.limit = saved_state[SMRAM_FIELD_P5_GDTR_LIMIT];
gdt.access = (saved_state[SMRAM_FIELD_P5_GDTR_ACCESS] >> 8) & 0xff;
gdt.ar_high = (saved_state[SMRAM_FIELD_P5_GDTR_ACCESS] >> 16) & 0xff;
/* ES */
cpu_state.seg_es.seg = saved_state[SMRAM_FIELD_P5_ES_SELECTOR];
cpu_state.seg_es.base = saved_state[SMRAM_FIELD_P5_ES_BASE];
cpu_state.seg_es.limit = saved_state[SMRAM_FIELD_P5_ES_LIMIT];
cpu_state.seg_es.access = (saved_state[SMRAM_FIELD_P5_ES_ACCESS] >> 8) & 0xff;
cpu_state.seg_es.ar_high = (saved_state[SMRAM_FIELD_P5_ES_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_es);
/* CS */
cpu_state.seg_cs.seg = saved_state[SMRAM_FIELD_P5_CS_SELECTOR];
cpu_state.seg_cs.base = saved_state[SMRAM_FIELD_P5_CS_BASE];
cpu_state.seg_cs.limit = saved_state[SMRAM_FIELD_P5_CS_LIMIT];
cpu_state.seg_cs.access = (saved_state[SMRAM_FIELD_P5_CS_ACCESS] >> 8) & 0xff;
cpu_state.seg_cs.ar_high = (saved_state[SMRAM_FIELD_P5_CS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_cs);
/* DS */
cpu_state.seg_ds.seg = saved_state[SMRAM_FIELD_P5_DS_SELECTOR];
cpu_state.seg_ds.base = saved_state[SMRAM_FIELD_P5_DS_BASE];
cpu_state.seg_ds.limit = saved_state[SMRAM_FIELD_P5_DS_LIMIT];
cpu_state.seg_ds.access = (saved_state[SMRAM_FIELD_P5_DS_ACCESS] >> 8) & 0xff;
cpu_state.seg_ds.ar_high = (saved_state[SMRAM_FIELD_P5_DS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_ds);
/* SS */
cpu_state.seg_ss.seg = saved_state[SMRAM_FIELD_P5_SS_SELECTOR];
cpu_state.seg_ss.base = saved_state[SMRAM_FIELD_P5_SS_BASE];
cpu_state.seg_ss.limit = saved_state[SMRAM_FIELD_P5_SS_LIMIT];
cpu_state.seg_ss.access = (saved_state[SMRAM_FIELD_P5_SS_ACCESS] >> 8) & 0xff;
/* The actual CPL (DPL of CS) is overwritten with DPL of SS. */
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~0x60) | (cpu_state.seg_ss.access & 0x60);
cpu_state.seg_ss.ar_high = (saved_state[SMRAM_FIELD_P5_SS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_ss);
/* FS */
cpu_state.seg_fs.seg = saved_state[SMRAM_FIELD_P5_FS_SELECTOR];
cpu_state.seg_fs.base = saved_state[SMRAM_FIELD_P5_FS_BASE];
cpu_state.seg_fs.limit = saved_state[SMRAM_FIELD_P5_FS_LIMIT];
cpu_state.seg_fs.access = (saved_state[SMRAM_FIELD_P5_FS_ACCESS] >> 8) & 0xff;
cpu_state.seg_fs.ar_high = (saved_state[SMRAM_FIELD_P5_FS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_fs);
/* GS */
cpu_state.seg_gs.seg = saved_state[SMRAM_FIELD_P5_GS_SELECTOR];
cpu_state.seg_gs.base = saved_state[SMRAM_FIELD_P5_GS_BASE];
cpu_state.seg_gs.limit = saved_state[SMRAM_FIELD_P5_GS_LIMIT];
cpu_state.seg_gs.access = (saved_state[SMRAM_FIELD_P5_GS_ACCESS] >> 8) & 0xff;
cpu_state.seg_gs.ar_high = (saved_state[SMRAM_FIELD_P5_GS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_gs);
if (SMM_REVISION_ID & SMM_SMBASE_RELOCATION)
smbase = saved_state[SMRAM_FIELD_P5_SMBASE_OFFSET];
/* Am486/5x86 stuff */
if (!is_pentium) {
cr2 = saved_state[SMRAM_FIELD_AM486_CR2];
dr[0] = saved_state[SMRAM_FIELD_AM486_DR0];
dr[1] = saved_state[SMRAM_FIELD_AM486_DR1];
dr[2] = saved_state[SMRAM_FIELD_AM486_DR2];
dr[3] = saved_state[SMRAM_FIELD_AM486_DR3];
}
}
static void
smram_save_state_p6(uint32_t *saved_state, int in_hlt)
{
saved_state[SMRAM_FIELD_P6_SMM_REVISION_ID] = SMM_REVISION_ID;
saved_state[SMRAM_FIELD_P6_SMBASE_OFFSET] = smbase;
for (uint8_t n = 0; n < 8; n++)
saved_state[SMRAM_FIELD_P6_EAX - n] = cpu_state.regs[n].l;
if (in_hlt)
saved_state[SMRAM_FIELD_P6_AUTOHALT_RESTART] = 1;
else
saved_state[SMRAM_FIELD_P6_AUTOHALT_RESTART] = 0;
saved_state[SMRAM_FIELD_P6_EIP] = cpu_state.pc;
saved_state[SMRAM_FIELD_P6_EFLAGS] = (cpu_state.eflags << 16) | (cpu_state.flags);
saved_state[SMRAM_FIELD_P6_CR0] = cr0;
saved_state[SMRAM_FIELD_P6_CR3] = cr3;
saved_state[SMRAM_FIELD_P6_CR4] = cr4;
saved_state[SMRAM_FIELD_P6_DR6] = dr[6];
saved_state[SMRAM_FIELD_P6_DR7] = dr[7];
saved_state[SMRAM_FIELD_P6_CPL] = CPL;
saved_state[SMRAM_FIELD_P6_A20M] = !mem_a20_state;
/* TR */
saved_state[SMRAM_FIELD_P6_TR_SELECTOR] = tr.seg;
saved_state[SMRAM_FIELD_P6_TR_BASE] = tr.base;
saved_state[SMRAM_FIELD_P6_TR_LIMIT] = tr.limit;
saved_state[SMRAM_FIELD_P6_TR_SELECTOR_AR] = (tr.ar_high << 24) | (tr.access << 16) | tr.seg;
/* LDTR */
saved_state[SMRAM_FIELD_P6_LDTR_SELECTOR] = ldt.seg;
saved_state[SMRAM_FIELD_P6_LDTR_BASE] = ldt.base;
saved_state[SMRAM_FIELD_P6_LDTR_LIMIT] = ldt.limit;
saved_state[SMRAM_FIELD_P6_LDTR_SELECTOR_AR] = (ldt.ar_high << 24) | (ldt.access << 16) | ldt.seg;
/* IDTR */
saved_state[SMRAM_FIELD_P6_IDTR_BASE] = idt.base;
saved_state[SMRAM_FIELD_P6_IDTR_LIMIT] = idt.limit;
saved_state[SMRAM_FIELD_P6_IDTR_SELECTOR_AR] = (idt.ar_high << 24) | (idt.access << 16) | idt.seg;
/* GDTR */
saved_state[SMRAM_FIELD_P6_GDTR_BASE] = gdt.base;
saved_state[SMRAM_FIELD_P6_GDTR_LIMIT] = gdt.limit;
saved_state[SMRAM_FIELD_P6_GDTR_SELECTOR_AR] = (gdt.ar_high << 24) | (gdt.access << 16) | gdt.seg;
/* ES */
saved_state[SMRAM_FIELD_P6_ES_SELECTOR] = cpu_state.seg_es.seg;
saved_state[SMRAM_FIELD_P6_ES_BASE] = cpu_state.seg_es.base;
saved_state[SMRAM_FIELD_P6_ES_LIMIT] = cpu_state.seg_es.limit;
saved_state[SMRAM_FIELD_P6_ES_SELECTOR_AR] = (cpu_state.seg_es.ar_high << 24) | (cpu_state.seg_es.access << 16) | cpu_state.seg_es.seg;
/* CS */
saved_state[SMRAM_FIELD_P6_CS_SELECTOR] = cpu_state.seg_cs.seg;
saved_state[SMRAM_FIELD_P6_CS_BASE] = cpu_state.seg_cs.base;
saved_state[SMRAM_FIELD_P6_CS_LIMIT] = cpu_state.seg_cs.limit;
saved_state[SMRAM_FIELD_P6_CS_SELECTOR_AR] = (cpu_state.seg_cs.ar_high << 24) | (cpu_state.seg_cs.access << 16) | cpu_state.seg_cs.seg;
/* DS */
saved_state[SMRAM_FIELD_P6_DS_SELECTOR] = cpu_state.seg_ds.seg;
saved_state[SMRAM_FIELD_P6_DS_BASE] = cpu_state.seg_ds.base;
saved_state[SMRAM_FIELD_P6_DS_LIMIT] = cpu_state.seg_ds.limit;
saved_state[SMRAM_FIELD_P6_DS_SELECTOR_AR] = (cpu_state.seg_ds.ar_high << 24) | (cpu_state.seg_ds.access << 16) | cpu_state.seg_ds.seg;
/* SS */
saved_state[SMRAM_FIELD_P6_SS_SELECTOR] = cpu_state.seg_ss.seg;
saved_state[SMRAM_FIELD_P6_SS_BASE] = cpu_state.seg_ss.base;
saved_state[SMRAM_FIELD_P6_SS_LIMIT] = cpu_state.seg_ss.limit;
saved_state[SMRAM_FIELD_P6_SS_SELECTOR_AR] = (cpu_state.seg_ss.ar_high << 24) | (cpu_state.seg_ss.access << 16) | cpu_state.seg_ss.seg;
/* FS */
saved_state[SMRAM_FIELD_P6_FS_SELECTOR] = cpu_state.seg_fs.seg;
saved_state[SMRAM_FIELD_P6_FS_BASE] = cpu_state.seg_fs.base;
saved_state[SMRAM_FIELD_P6_FS_LIMIT] = cpu_state.seg_fs.limit;
saved_state[SMRAM_FIELD_P6_FS_SELECTOR_AR] = (cpu_state.seg_fs.ar_high << 24) | (cpu_state.seg_fs.access << 16) | cpu_state.seg_fs.seg;
/* GS */
saved_state[SMRAM_FIELD_P6_GS_SELECTOR] = cpu_state.seg_gs.seg;
saved_state[SMRAM_FIELD_P6_GS_BASE] = cpu_state.seg_gs.base;
saved_state[SMRAM_FIELD_P6_GS_LIMIT] = cpu_state.seg_gs.limit;
saved_state[SMRAM_FIELD_P6_GS_SELECTOR_AR] = (cpu_state.seg_gs.ar_high << 24) | (cpu_state.seg_gs.access << 16) | cpu_state.seg_gs.seg;
}
static void
smram_restore_state_p6(uint32_t *saved_state)
{
for (uint8_t n = 0; n < 8; n++)
cpu_state.regs[n].l = saved_state[SMRAM_FIELD_P6_EAX - n];
if (saved_state[SMRAM_FIELD_P6_AUTOHALT_RESTART] & 0xffff)
cpu_state.pc = saved_state[SMRAM_FIELD_P6_EIP] - 1;
else
cpu_state.pc = saved_state[SMRAM_FIELD_P6_EIP];
cpu_state.eflags = saved_state[SMRAM_FIELD_P6_EFLAGS] >> 16;
cpu_state.flags = saved_state[SMRAM_FIELD_P6_EFLAGS] & 0xffff;
cr0 = saved_state[SMRAM_FIELD_P6_CR0];
cr3 = saved_state[SMRAM_FIELD_P6_CR3];
cr4 = saved_state[SMRAM_FIELD_P6_CR4];
dr[6] = saved_state[SMRAM_FIELD_P6_DR6];
dr[7] = saved_state[SMRAM_FIELD_P6_DR7];
/* TR */
tr.seg = saved_state[SMRAM_FIELD_P6_TR_SELECTOR];
tr.base = saved_state[SMRAM_FIELD_P6_TR_BASE];
tr.limit = saved_state[SMRAM_FIELD_P6_TR_LIMIT];
tr.access = (saved_state[SMRAM_FIELD_P6_TR_SELECTOR_AR] >> 16) & 0xff;
tr.ar_high = (saved_state[SMRAM_FIELD_P6_TR_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&tr);
/* LDTR */
ldt.seg = saved_state[SMRAM_FIELD_P6_LDTR_SELECTOR];
ldt.base = saved_state[SMRAM_FIELD_P6_LDTR_BASE];
ldt.limit = saved_state[SMRAM_FIELD_P6_LDTR_LIMIT];
ldt.access = (saved_state[SMRAM_FIELD_P6_LDTR_SELECTOR_AR] >> 16) & 0xff;
ldt.ar_high = (saved_state[SMRAM_FIELD_P6_LDTR_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&ldt);
/* IDTR */
idt.base = saved_state[SMRAM_FIELD_P6_IDTR_BASE];
idt.limit = saved_state[SMRAM_FIELD_P6_IDTR_LIMIT];
idt.access = (saved_state[SMRAM_FIELD_P6_IDTR_SELECTOR_AR] >> 16) & 0xff;
idt.ar_high = (saved_state[SMRAM_FIELD_P6_IDTR_SELECTOR_AR] >> 24) & 0xff;
/* GDTR */
gdt.base = saved_state[SMRAM_FIELD_P6_GDTR_BASE];
gdt.limit = saved_state[SMRAM_FIELD_P6_GDTR_LIMIT];
gdt.access = (saved_state[SMRAM_FIELD_P6_GDTR_SELECTOR_AR] >> 16) & 0xff;
gdt.ar_high = (saved_state[SMRAM_FIELD_P6_GDTR_SELECTOR_AR] >> 24) & 0xff;
/* ES */
cpu_state.seg_es.seg = saved_state[SMRAM_FIELD_P6_ES_SELECTOR];
cpu_state.seg_es.base = saved_state[SMRAM_FIELD_P6_ES_BASE];
cpu_state.seg_es.limit = saved_state[SMRAM_FIELD_P6_ES_LIMIT];
cpu_state.seg_es.access = (saved_state[SMRAM_FIELD_P6_ES_SELECTOR_AR] >> 16) & 0xff;
cpu_state.seg_es.ar_high = (saved_state[SMRAM_FIELD_P6_ES_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&cpu_state.seg_es);
/* CS */
cpu_state.seg_cs.seg = saved_state[SMRAM_FIELD_P6_CS_SELECTOR];
cpu_state.seg_cs.base = saved_state[SMRAM_FIELD_P6_CS_BASE];
cpu_state.seg_cs.limit = saved_state[SMRAM_FIELD_P6_CS_LIMIT];
cpu_state.seg_cs.access = (saved_state[SMRAM_FIELD_P6_CS_SELECTOR_AR] >> 16) & 0xff;
cpu_state.seg_cs.ar_high = (saved_state[SMRAM_FIELD_P6_CS_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&cpu_state.seg_cs);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~0x60) | ((saved_state[SMRAM_FIELD_P6_CPL] & 0x03) << 5);
/* DS */
cpu_state.seg_ds.seg = saved_state[SMRAM_FIELD_P6_DS_SELECTOR];
cpu_state.seg_ds.base = saved_state[SMRAM_FIELD_P6_DS_BASE];
cpu_state.seg_ds.limit = saved_state[SMRAM_FIELD_P6_DS_LIMIT];
cpu_state.seg_ds.access = (saved_state[SMRAM_FIELD_P6_DS_SELECTOR_AR] >> 16) & 0xff;
cpu_state.seg_ds.ar_high = (saved_state[SMRAM_FIELD_P6_DS_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&cpu_state.seg_ds);
/* SS */
cpu_state.seg_ss.seg = saved_state[SMRAM_FIELD_P6_SS_SELECTOR];
cpu_state.seg_ss.base = saved_state[SMRAM_FIELD_P6_SS_BASE];
cpu_state.seg_ss.limit = saved_state[SMRAM_FIELD_P6_SS_LIMIT];
cpu_state.seg_ss.access = (saved_state[SMRAM_FIELD_P6_SS_SELECTOR_AR] >> 16) & 0xff;
cpu_state.seg_ss.ar_high = (saved_state[SMRAM_FIELD_P6_SS_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&cpu_state.seg_ss);
/* FS */
cpu_state.seg_fs.seg = saved_state[SMRAM_FIELD_P6_FS_SELECTOR];
cpu_state.seg_fs.base = saved_state[SMRAM_FIELD_P6_FS_BASE];
cpu_state.seg_fs.limit = saved_state[SMRAM_FIELD_P6_FS_LIMIT];
cpu_state.seg_fs.access = (saved_state[SMRAM_FIELD_P6_FS_SELECTOR_AR] >> 16) & 0xff;
cpu_state.seg_fs.ar_high = (saved_state[SMRAM_FIELD_P6_FS_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&cpu_state.seg_fs);
/* GS */
cpu_state.seg_gs.seg = saved_state[SMRAM_FIELD_P6_GS_SELECTOR];
cpu_state.seg_gs.base = saved_state[SMRAM_FIELD_P6_GS_BASE];
cpu_state.seg_gs.limit = saved_state[SMRAM_FIELD_P6_GS_LIMIT];
cpu_state.seg_gs.access = (saved_state[SMRAM_FIELD_P6_GS_SELECTOR_AR] >> 16) & 0xff;
cpu_state.seg_gs.ar_high = (saved_state[SMRAM_FIELD_P6_GS_SELECTOR_AR] >> 24) & 0xff;
smm_seg_load(&cpu_state.seg_gs);
mem_a20_alt = 0x00;
mem_a20_key = saved_state[SMRAM_FIELD_P6_A20M] ? 0x00 : 0x02;
mem_a20_recalc();
if (SMM_REVISION_ID & SMM_SMBASE_RELOCATION)
smbase = saved_state[SMRAM_FIELD_P6_SMBASE_OFFSET];
}
static void
smram_save_state_amd_k(uint32_t *saved_state, int in_hlt)
{
saved_state[SMRAM_FIELD_AMD_K_SMM_REVISION_ID] = SMM_REVISION_ID;
saved_state[SMRAM_FIELD_AMD_K_SMBASE_OFFSET] = smbase;
for (uint8_t n = 0; n < 8; n++)
saved_state[SMRAM_FIELD_AMD_K_EAX - n] = cpu_state.regs[n].l;
if (in_hlt)
saved_state[SMRAM_FIELD_AMD_K_AUTOHALT_RESTART] = 1;
else
saved_state[SMRAM_FIELD_AMD_K_AUTOHALT_RESTART] = 0;
saved_state[SMRAM_FIELD_AMD_K_EIP] = cpu_state.pc;
saved_state[SMRAM_FIELD_AMD_K_EFLAGS] = (cpu_state.eflags << 16) | (cpu_state.flags);
saved_state[SMRAM_FIELD_AMD_K_CR0] = cr0;
saved_state[SMRAM_FIELD_AMD_K_CR2] = cr2;
saved_state[SMRAM_FIELD_AMD_K_CR3] = cr3;
saved_state[SMRAM_FIELD_AMD_K_CR4] = cr4;
saved_state[SMRAM_FIELD_AMD_K_DR6] = dr[6];
saved_state[SMRAM_FIELD_AMD_K_DR7] = dr[7];
/* TR */
saved_state[SMRAM_FIELD_AMD_K_TR_SELECTOR] = tr.seg;
saved_state[SMRAM_FIELD_AMD_K_TR_BASE] = tr.base;
saved_state[SMRAM_FIELD_AMD_K_TR_LIMIT] = tr.limit;
saved_state[SMRAM_FIELD_AMD_K_TR_ACCESS] = (tr.ar_high << 16) | (tr.access << 8);
/* LDTR */
saved_state[SMRAM_FIELD_AMD_K_LDTR_SELECTOR] = ldt.seg;
saved_state[SMRAM_FIELD_AMD_K_LDTR_BASE] = ldt.base;
saved_state[SMRAM_FIELD_AMD_K_LDTR_LIMIT] = ldt.limit;
if (!is_k6)
saved_state[SMRAM_FIELD_AMD_K_LDTR_ACCESS] = (ldt.ar_high << 16) | (ldt.access << 8);
/* IDTR */
saved_state[SMRAM_FIELD_AMD_K_IDTR_BASE] = idt.base;
saved_state[SMRAM_FIELD_AMD_K_IDTR_LIMIT] = idt.limit;
/* GDTR */
saved_state[SMRAM_FIELD_AMD_K_GDTR_BASE] = gdt.base;
saved_state[SMRAM_FIELD_AMD_K_GDTR_LIMIT] = gdt.limit;
/* ES */
saved_state[SMRAM_FIELD_AMD_K_ES_SELECTOR] = cpu_state.seg_es.seg;
saved_state[SMRAM_FIELD_AMD_K_ES_BASE] = cpu_state.seg_es.base;
saved_state[SMRAM_FIELD_AMD_K_ES_LIMIT] = cpu_state.seg_es.limit;
saved_state[SMRAM_FIELD_AMD_K_ES_ACCESS] = (cpu_state.seg_es.ar_high << 16) | (cpu_state.seg_es.access << 8);
/* CS */
saved_state[SMRAM_FIELD_AMD_K_CS_SELECTOR] = cpu_state.seg_cs.seg;
saved_state[SMRAM_FIELD_AMD_K_CS_BASE] = cpu_state.seg_cs.base;
saved_state[SMRAM_FIELD_AMD_K_CS_LIMIT] = cpu_state.seg_cs.limit;
saved_state[SMRAM_FIELD_AMD_K_CS_ACCESS] = (cpu_state.seg_cs.ar_high << 16) | (cpu_state.seg_cs.access << 8);
/* DS */
saved_state[SMRAM_FIELD_AMD_K_DS_SELECTOR] = cpu_state.seg_ds.seg;
saved_state[SMRAM_FIELD_AMD_K_DS_BASE] = cpu_state.seg_ds.base;
saved_state[SMRAM_FIELD_AMD_K_DS_LIMIT] = cpu_state.seg_ds.limit;
saved_state[SMRAM_FIELD_AMD_K_DS_ACCESS] = (cpu_state.seg_ds.ar_high << 16) | (cpu_state.seg_ds.access << 8);
/* SS */
saved_state[SMRAM_FIELD_AMD_K_SS_SELECTOR] = cpu_state.seg_ss.seg;
saved_state[SMRAM_FIELD_AMD_K_SS_BASE] = cpu_state.seg_ss.base;
saved_state[SMRAM_FIELD_AMD_K_SS_LIMIT] = cpu_state.seg_ss.limit;
saved_state[SMRAM_FIELD_AMD_K_SS_ACCESS] = (cpu_state.seg_ss.ar_high << 16) | (cpu_state.seg_ss.access << 8);
/* FS */
saved_state[SMRAM_FIELD_AMD_K_FS_SELECTOR] = cpu_state.seg_fs.seg;
saved_state[SMRAM_FIELD_AMD_K_FS_BASE] = cpu_state.seg_fs.base;
saved_state[SMRAM_FIELD_AMD_K_FS_LIMIT] = cpu_state.seg_fs.limit;
saved_state[SMRAM_FIELD_AMD_K_FS_ACCESS] = (cpu_state.seg_fs.ar_high << 16) | (cpu_state.seg_fs.access << 8);
/* GS */
saved_state[SMRAM_FIELD_AMD_K_GS_SELECTOR] = cpu_state.seg_gs.seg;
saved_state[SMRAM_FIELD_AMD_K_GS_BASE] = cpu_state.seg_gs.base;
saved_state[SMRAM_FIELD_AMD_K_GS_LIMIT] = cpu_state.seg_gs.limit;
saved_state[SMRAM_FIELD_AMD_K_GS_ACCESS] = (cpu_state.seg_gs.ar_high << 16) | (cpu_state.seg_gs.access << 8);
}
static void
smram_restore_state_amd_k(uint32_t *saved_state)
{
for (uint8_t n = 0; n < 8; n++)
cpu_state.regs[n].l = saved_state[SMRAM_FIELD_AMD_K_EAX - n];
if (saved_state[SMRAM_FIELD_AMD_K_AUTOHALT_RESTART] & 0xffff)
cpu_state.pc = saved_state[SMRAM_FIELD_AMD_K_EIP] - 1;
else
cpu_state.pc = saved_state[SMRAM_FIELD_AMD_K_EIP];
cpu_state.eflags = saved_state[SMRAM_FIELD_AMD_K_EFLAGS] >> 16;
cpu_state.flags = saved_state[SMRAM_FIELD_AMD_K_EFLAGS] & 0xffff;
cr0 = saved_state[SMRAM_FIELD_AMD_K_CR0];
cr2 = saved_state[SMRAM_FIELD_AMD_K_CR2];
cr3 = saved_state[SMRAM_FIELD_AMD_K_CR3];
cr4 = saved_state[SMRAM_FIELD_AMD_K_CR4];
dr[6] = saved_state[SMRAM_FIELD_AMD_K_DR6];
dr[7] = saved_state[SMRAM_FIELD_AMD_K_DR7];
/* TR */
tr.seg = saved_state[SMRAM_FIELD_AMD_K_TR_SELECTOR];
tr.base = saved_state[SMRAM_FIELD_AMD_K_TR_BASE];
tr.limit = saved_state[SMRAM_FIELD_AMD_K_TR_LIMIT];
tr.access = (saved_state[SMRAM_FIELD_AMD_K_TR_ACCESS] >> 8) & 0xff;
tr.ar_high = (saved_state[SMRAM_FIELD_AMD_K_TR_ACCESS] >> 16) & 0xff;
smm_seg_load(&tr);
/* LDTR */
ldt.seg = saved_state[SMRAM_FIELD_AMD_K_LDTR_SELECTOR];
ldt.base = saved_state[SMRAM_FIELD_AMD_K_LDTR_BASE];
ldt.limit = saved_state[SMRAM_FIELD_AMD_K_LDTR_LIMIT];
if (!is_k6) {
ldt.access = (saved_state[SMRAM_FIELD_AMD_K_LDTR_ACCESS] >> 8) & 0xff;
ldt.ar_high = (saved_state[SMRAM_FIELD_AMD_K_LDTR_ACCESS] >> 16) & 0xff;
}
smm_seg_load(&ldt);
/* IDTR */
idt.base = saved_state[SMRAM_FIELD_AMD_K_IDTR_BASE];
idt.limit = saved_state[SMRAM_FIELD_AMD_K_IDTR_LIMIT];
/* GDTR */
gdt.base = saved_state[SMRAM_FIELD_AMD_K_GDTR_BASE];
gdt.limit = saved_state[SMRAM_FIELD_AMD_K_GDTR_LIMIT];
/* ES */
cpu_state.seg_es.seg = saved_state[SMRAM_FIELD_AMD_K_ES_SELECTOR];
cpu_state.seg_es.base = saved_state[SMRAM_FIELD_AMD_K_ES_BASE];
cpu_state.seg_es.limit = saved_state[SMRAM_FIELD_AMD_K_ES_LIMIT];
cpu_state.seg_es.access = (saved_state[SMRAM_FIELD_AMD_K_ES_ACCESS] >> 8) & 0xff;
cpu_state.seg_es.ar_high = (saved_state[SMRAM_FIELD_AMD_K_ES_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_es);
/* CS */
cpu_state.seg_cs.seg = saved_state[SMRAM_FIELD_AMD_K_CS_SELECTOR];
cpu_state.seg_cs.base = saved_state[SMRAM_FIELD_AMD_K_CS_BASE];
cpu_state.seg_cs.limit = saved_state[SMRAM_FIELD_AMD_K_CS_LIMIT];
cpu_state.seg_cs.access = (saved_state[SMRAM_FIELD_AMD_K_CS_ACCESS] >> 8) & 0xff;
cpu_state.seg_cs.ar_high = (saved_state[SMRAM_FIELD_AMD_K_CS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_cs);
/* DS */
cpu_state.seg_ds.seg = saved_state[SMRAM_FIELD_AMD_K_DS_SELECTOR];
cpu_state.seg_ds.base = saved_state[SMRAM_FIELD_AMD_K_DS_BASE];
cpu_state.seg_ds.limit = saved_state[SMRAM_FIELD_AMD_K_DS_LIMIT];
cpu_state.seg_ds.access = (saved_state[SMRAM_FIELD_AMD_K_DS_ACCESS] >> 8) & 0xff;
cpu_state.seg_ds.ar_high = (saved_state[SMRAM_FIELD_AMD_K_DS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_ds);
/* SS */
cpu_state.seg_ss.seg = saved_state[SMRAM_FIELD_AMD_K_SS_SELECTOR];
cpu_state.seg_ss.base = saved_state[SMRAM_FIELD_AMD_K_SS_BASE];
cpu_state.seg_ss.limit = saved_state[SMRAM_FIELD_AMD_K_SS_LIMIT];
cpu_state.seg_ss.access = (saved_state[SMRAM_FIELD_AMD_K_SS_ACCESS] >> 8) & 0xff;
/* The actual CPL (DPL of CS) is overwritten with DPL of SS. */
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~0x60) | (cpu_state.seg_ss.access & 0x60);
cpu_state.seg_ss.ar_high = (saved_state[SMRAM_FIELD_AMD_K_SS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_ss);
/* FS */
cpu_state.seg_fs.seg = saved_state[SMRAM_FIELD_AMD_K_FS_SELECTOR];
cpu_state.seg_fs.base = saved_state[SMRAM_FIELD_AMD_K_FS_BASE];
cpu_state.seg_fs.limit = saved_state[SMRAM_FIELD_AMD_K_FS_LIMIT];
cpu_state.seg_fs.access = (saved_state[SMRAM_FIELD_AMD_K_FS_ACCESS] >> 8) & 0xff;
cpu_state.seg_fs.ar_high = (saved_state[SMRAM_FIELD_AMD_K_FS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_fs);
/* GS */
cpu_state.seg_gs.seg = saved_state[SMRAM_FIELD_AMD_K_GS_SELECTOR];
cpu_state.seg_gs.base = saved_state[SMRAM_FIELD_AMD_K_GS_BASE];
cpu_state.seg_gs.limit = saved_state[SMRAM_FIELD_AMD_K_GS_LIMIT];
cpu_state.seg_gs.access = (saved_state[SMRAM_FIELD_AMD_K_GS_ACCESS] >> 8) & 0xff;
cpu_state.seg_gs.ar_high = (saved_state[SMRAM_FIELD_AMD_K_GS_ACCESS] >> 16) & 0xff;
smm_seg_load(&cpu_state.seg_gs);
if (SMM_REVISION_ID & SMM_SMBASE_RELOCATION)
smbase = saved_state[SMRAM_FIELD_AMD_K_SMBASE_OFFSET];
}
static void
smram_save_state_cyrix(uint32_t *saved_state, UNUSED(int in_hlt))
{
saved_state[0] = dr[7];
saved_state[1] = cpu_state.flags | (cpu_state.eflags << 16);
saved_state[2] = cr0;
saved_state[3] = cpu_state.oldpc;
saved_state[4] = cpu_state.pc;
saved_state[5] = CS | (CPL << 21);
saved_state[6] = 0x00000000;
}
static void
smram_restore_state_cyrix(uint32_t *saved_state)
{
dr[7] = saved_state[0];
cpu_state.flags = saved_state[1] & 0xffff;
cpu_state.eflags = saved_state[1] >> 16;
cr0 = saved_state[2];
cpu_state.pc = saved_state[4];
}
void
enter_smm(int in_hlt)
{
uint32_t saved_state[SMM_SAVE_STATE_MAP_SIZE];
uint32_t smram_state = smbase + 0x10000;
/* If it's a CPU on which SMM is not supported, do nothing. */
if (!is_am486 && !is_pentium && !is_k5 && !is_k6 && !is_p6 && !is_cxsmm)
return;
x386_common_log("enter_smm(): smbase = %08X\n", smbase);
x386_common_log("CS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_cs.seg, cpu_state.seg_cs.base, cpu_state.seg_cs.limit, cpu_state.seg_cs.limit_low,
cpu_state.seg_cs.limit_high, cpu_state.seg_cs.access, cpu_state.seg_cs.ar_high);
x386_common_log("DS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_ds.seg, cpu_state.seg_ds.base, cpu_state.seg_ds.limit, cpu_state.seg_ds.limit_low,
cpu_state.seg_ds.limit_high, cpu_state.seg_ds.access, cpu_state.seg_ds.ar_high);
x386_common_log("ES : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_es.seg, cpu_state.seg_es.base, cpu_state.seg_es.limit, cpu_state.seg_es.limit_low,
cpu_state.seg_es.limit_high, cpu_state.seg_es.access, cpu_state.seg_es.ar_high);
x386_common_log("FS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_fs.seg, cpu_state.seg_fs.base, cpu_state.seg_fs.limit, cpu_state.seg_fs.limit_low,
cpu_state.seg_fs.limit_high, cpu_state.seg_fs.access, cpu_state.seg_fs.ar_high);
x386_common_log("GS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_gs.seg, cpu_state.seg_gs.base, cpu_state.seg_gs.limit, cpu_state.seg_gs.limit_low,
cpu_state.seg_gs.limit_high, cpu_state.seg_gs.access, cpu_state.seg_gs.ar_high);
x386_common_log("SS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_ss.seg, cpu_state.seg_ss.base, cpu_state.seg_ss.limit, cpu_state.seg_ss.limit_low,
cpu_state.seg_ss.limit_high, cpu_state.seg_ss.access, cpu_state.seg_ss.ar_high);
x386_common_log("TR : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
tr.seg, tr.base, tr.limit, tr.limit_low, tr.limit_high, tr.access, tr.ar_high);
x386_common_log("LDT: seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
ldt.seg, ldt.base, ldt.limit, ldt.limit_low, ldt.limit_high, ldt.access, ldt.ar_high);
x386_common_log("GDT: seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
gdt.seg, gdt.base, gdt.limit, gdt.limit_low, gdt.limit_high, gdt.access, gdt.ar_high);
x386_common_log("IDT: seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
idt.seg, idt.base, idt.limit, idt.limit_low, idt.limit_high, idt.access, idt.ar_high);
x386_common_log("CR0 = %08X, CR3 = %08X, CR4 = %08X, DR6 = %08X, DR7 = %08X\n", cr0, cr3, cr4, dr[6], dr[7]);
x386_common_log("EIP = %08X, EFLAGS = %04X%04X\n", cpu_state.pc, cpu_state.eflags, cpu_state.flags);
x386_common_log("EAX = %08X, EBX = %08X, ECX = %08X, EDX = %08X, ESI = %08X, EDI = %08X, ESP = %08X, EBP = %08X\n",
EAX, EBX, ECX, EDX, ESI, EDI, ESP, EBP);
flags_rebuild();
in_smm = 1;
smram_backup_all();
smram_recalc_all(0);
if (is_cxsmm) {
if (!(cyrix.smhr & SMHR_VALID))
cyrix.smhr = (cyrix.arr[3].base + cyrix.arr[3].size) | SMHR_VALID;
smram_state = cyrix.smhr & SMHR_ADDR_MASK;
}
memset(saved_state, 0x00, SMM_SAVE_STATE_MAP_SIZE * sizeof(uint32_t));
if (is_cxsmm) /* Cx6x86 */
smram_save_state_cyrix(saved_state, in_hlt);
else if (is_pentium || is_am486) /* Am486 / 5x86 / Intel P5 (Pentium) */
smram_save_state_p5(saved_state, in_hlt);
else if (is_k5 || is_k6) /* AMD K5 and K6 */
smram_save_state_amd_k(saved_state, in_hlt);
else if (is_p6) /* Intel P6 (Pentium Pro, Pentium II, Celeron) */
smram_save_state_p6(saved_state, in_hlt);
cr0 &= ~0x8000000d;
cpu_state.flags = 2;
cpu_state.eflags = 0;
cr4 = 0;
dr[7] = 0x400;
if (is_cxsmm) {
cpu_state.pc = 0x0000;
cpl_override = 1;
if (is486)
cyrix_write_seg_descriptor(smram_state - 0x20, &cpu_state.seg_cs);
else
cyrix_write_seg_descriptor_2386(smram_state - 0x20, &cpu_state.seg_cs);
cpl_override = 0;
cpu_state.seg_cs.seg = (cyrix.arr[3].base >> 4);
cpu_state.seg_cs.base = cyrix.arr[3].base;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.access = 0x93;
cpu_state.seg_cs.ar_high = 0x80;
cpu_state.seg_cs.checked = 1;
smm_seg_load(&cpu_state.seg_cs);
} else {
cpu_state.pc = 0x8000;
cpu_state.seg_ds.seg = 0x00000000;
cpu_state.seg_ds.base = 0x00000000;
cpu_state.seg_ds.limit = 0xffffffff;
cpu_state.seg_ds.access = 0x93;
cpu_state.seg_ds.ar_high = 0x80;
memcpy(&cpu_state.seg_es, &cpu_state.seg_ds, sizeof(x86seg));
memcpy(&cpu_state.seg_ss, &cpu_state.seg_ds, sizeof(x86seg));
memcpy(&cpu_state.seg_fs, &cpu_state.seg_ds, sizeof(x86seg));
memcpy(&cpu_state.seg_gs, &cpu_state.seg_ds, sizeof(x86seg));
if (is_p6)
cpu_state.seg_cs.seg = (smbase >> 4);
else
cpu_state.seg_cs.seg = 0x3000;
/* On Pentium, CS selector in SMM is always 3000, regardless of SMBASE. */
cpu_state.seg_cs.base = smbase;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.access = 0x93;
cpu_state.seg_cs.ar_high = 0x80;
cpu_state.seg_cs.checked = 1;
smm_seg_load(&cpu_state.seg_es);
smm_seg_load(&cpu_state.seg_cs);
smm_seg_load(&cpu_state.seg_ds);
smm_seg_load(&cpu_state.seg_ss);
smm_seg_load(&cpu_state.seg_fs);
smm_seg_load(&cpu_state.seg_gs);
}
cpu_state.op32 = use32;
cpl_override = 1;
if (is_cxsmm) {
writememl(0, smram_state - 0x04, saved_state[0]);
writememl(0, smram_state - 0x08, saved_state[1]);
writememl(0, smram_state - 0x0c, saved_state[2]);
writememl(0, smram_state - 0x10, saved_state[3]);
writememl(0, smram_state - 0x14, saved_state[4]);
writememl(0, smram_state - 0x18, saved_state[5]);
writememl(0, smram_state - 0x24, saved_state[6]);
} else {
for (uint8_t n = 0; n < SMM_SAVE_STATE_MAP_SIZE; n++) {
smram_state -= 4;
writememl(0, smram_state, saved_state[n]);
}
}
cpl_override = 0;
nmi_mask = 0;
if (smi_latched) {
in_smm = 2;
smi_latched = 0;
} else
in_smm = 1;
smm_in_hlt = in_hlt;
if (unmask_a20_in_smm) {
old_rammask = rammask;
rammask = cpu_16bitbus ? 0xFFFFFF : 0xFFFFFFFF;
if (is6117)
rammask |= 0x3000000;
flushmmucache();
}
oldcpl = 0;
cpu_cur_status &= ~(CPU_STATUS_PMODE | CPU_STATUS_V86);
CPU_BLOCK_END();
}
void
enter_smm_check(int in_hlt)
{
if ((in_smm == 0) && smi_line) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SMI while not in SMM\n");
#endif
enter_smm(in_hlt);
} else if ((in_smm == 1) && smi_line) {
/* Mark this so that we don't latch more than one SMI. */
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SMI while in unlatched SMM\n");
#endif
smi_latched = 1;
} else if ((in_smm == 2) && smi_line) {
/* Mark this so that we don't latch more than one SMI. */
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SMI while in latched SMM\n");
#endif
}
if (smi_line)
smi_line = 0;
}
void
leave_smm(void)
{
uint32_t saved_state[SMM_SAVE_STATE_MAP_SIZE];
uint32_t smram_state = smbase + 0x10000;
/* If it's a CPU on which SMM is not supported (or not implemented in 86Box), do nothing. */
if (!is_am486 && !is_pentium && !is_k5 && !is_k6 && !is_p6 && !is_cxsmm)
return;
memset(saved_state, 0x00, SMM_SAVE_STATE_MAP_SIZE * sizeof(uint32_t));
cpl_override = 1;
if (is_cxsmm) {
smram_state = cyrix.smhr & SMHR_ADDR_MASK;
saved_state[0] = readmeml(0, smram_state - 0x04);
saved_state[1] = readmeml(0, smram_state - 0x08);
saved_state[2] = readmeml(0, smram_state - 0x0c);
saved_state[3] = readmeml(0, smram_state - 0x10);
saved_state[4] = readmeml(0, smram_state - 0x14);
saved_state[5] = readmeml(0, smram_state - 0x18);
if (is486)
cyrix_load_seg_descriptor(smram_state - 0x20, &cpu_state.seg_cs);
else
cyrix_load_seg_descriptor_2386(smram_state - 0x20, &cpu_state.seg_cs);
saved_state[6] = readmeml(0, smram_state - 0x24);
} else {
for (uint8_t n = 0; n < SMM_SAVE_STATE_MAP_SIZE; n++) {
smram_state -= 4;
saved_state[n] = readmeml(0, smram_state);
x386_common_log("Reading %08X from memory at %08X to array element %i\n", saved_state[n], smram_state, n);
}
}
cpl_override = 0;
if (unmask_a20_in_smm) {
rammask = old_rammask;
flushmmucache();
}
x386_common_log("New SMBASE: %08X (%08X)\n", saved_state[SMRAM_FIELD_P5_SMBASE_OFFSET], saved_state[66]);
if (is_cxsmm) /* Cx6x86 */
smram_restore_state_cyrix(saved_state);
else if (is_pentium || is_am486) /* Am486 / 5x86 / Intel P5 (Pentium) */
smram_restore_state_p5(saved_state);
else if (is_k5 || is_k6) /* AMD K5 and K6 */
smram_restore_state_amd_k(saved_state);
else if (is_p6) /* Intel P6 (Pentium Pro, Pentium II, Celeron) */
smram_restore_state_p6(saved_state);
in_smm = 0;
smram_recalc_all(1);
cpu_386_flags_extract();
cpu_cur_status &= ~(CPU_STATUS_PMODE | CPU_STATUS_V86);
if (cr0 & 1) {
cpu_cur_status |= CPU_STATUS_PMODE;
if (cpu_state.eflags & VM_FLAG)
cpu_cur_status |= CPU_STATUS_V86;
}
nmi_mask = 1;
oldcpl = CPL;
CPU_BLOCK_END();
x386_common_log("CS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_cs.seg, cpu_state.seg_cs.base, cpu_state.seg_cs.limit, cpu_state.seg_cs.limit_low,
cpu_state.seg_cs.limit_high, cpu_state.seg_cs.access, cpu_state.seg_cs.ar_high);
x386_common_log("DS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_ds.seg, cpu_state.seg_ds.base, cpu_state.seg_ds.limit, cpu_state.seg_ds.limit_low,
cpu_state.seg_ds.limit_high, cpu_state.seg_ds.access, cpu_state.seg_ds.ar_high);
x386_common_log("ES : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_es.seg, cpu_state.seg_es.base, cpu_state.seg_es.limit, cpu_state.seg_es.limit_low,
cpu_state.seg_es.limit_high, cpu_state.seg_es.access, cpu_state.seg_es.ar_high);
x386_common_log("FS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_fs.seg, cpu_state.seg_fs.base, cpu_state.seg_fs.limit, cpu_state.seg_fs.limit_low,
cpu_state.seg_fs.limit_high, cpu_state.seg_fs.access, cpu_state.seg_fs.ar_high);
x386_common_log("GS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_gs.seg, cpu_state.seg_gs.base, cpu_state.seg_gs.limit, cpu_state.seg_gs.limit_low,
cpu_state.seg_gs.limit_high, cpu_state.seg_gs.access, cpu_state.seg_gs.ar_high);
x386_common_log("SS : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
cpu_state.seg_ss.seg, cpu_state.seg_ss.base, cpu_state.seg_ss.limit, cpu_state.seg_ss.limit_low,
cpu_state.seg_ss.limit_high, cpu_state.seg_ss.access, cpu_state.seg_ss.ar_high);
x386_common_log("TR : seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
tr.seg, tr.base, tr.limit, tr.limit_low, tr.limit_high, tr.access, tr.ar_high);
x386_common_log("LDT: seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
ldt.seg, ldt.base, ldt.limit, ldt.limit_low, ldt.limit_high, ldt.access, ldt.ar_high);
x386_common_log("GDT: seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
gdt.seg, gdt.base, gdt.limit, gdt.limit_low, gdt.limit_high, gdt.access, gdt.ar_high);
x386_common_log("IDT: seg = %04X, base = %08X, limit = %08X, limit_low = %08X, limit_high = %08X, access = %02X, ar_high = %02X\n",
idt.seg, idt.base, idt.limit, idt.limit_low, idt.limit_high, idt.access, idt.ar_high);
x386_common_log("CR0 = %08X, CR3 = %08X, CR4 = %08X, DR6 = %08X, DR7 = %08X\n", cr0, cr3, cr4, dr[6], dr[7]);
x386_common_log("EIP = %08X, EFLAGS = %04X%04X\n", cpu_state.pc, cpu_state.eflags, cpu_state.flags);
x386_common_log("EAX = %08X, EBX = %08X, ECX = %08X, EDX = %08X, ESI = %08X, EDI = %08X, ESP = %08X, EBP = %08X\n",
EAX, EBX, ECX, EDX, ESI, EDI, ESP, EBP);
x386_common_log("leave_smm()\n");
}
void
x86_int(int num)
{
uint32_t addr;
flags_rebuild();
cpu_state.pc = cpu_state.oldpc;
if (msw & 1)
cpu_use_exec ? pmodeint(num, 0) : pmodeint_2386(num, 0);
else {
addr = (num << 2) + idt.base;
if ((num << 2UL) + 3UL > idt.limit) {
if (idt.limit < 35) {
cpu_state.abrt = 0;
softresetx86();
cpu_set_edx();
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("Triple fault in real mode - reset\n");
#endif
} else
x86_int(8);
} else {
if (stack32) {
writememw(ss, ESP - 2, cpu_state.flags);
writememw(ss, ESP - 4, CS);
writememw(ss, ESP - 6, cpu_state.pc);
ESP -= 6;
} else {
writememw(ss, ((SP - 2) & 0xFFFF), cpu_state.flags);
writememw(ss, ((SP - 4) & 0xFFFF), CS);
writememw(ss, ((SP - 6) & 0xFFFF), cpu_state.pc);
SP -= 6;
}
cpu_state.flags &= ~I_FLAG;
cpu_state.flags &= ~T_FLAG;
#ifndef USE_NEW_DYNAREC
oxpc = cpu_state.pc;
#endif
cpu_state.pc = readmemw(0, addr);
cpu_use_exec ? loadcs(readmemw(0, addr + 2)) : loadcs_2386(readmemw(0, addr + 2));
}
}
cycles -= 70;
CPU_BLOCK_END();
}
void
x86_int_sw(int num)
{
uint32_t addr;
flags_rebuild();
cycles -= timing_int;
if (msw & 1)
cpu_use_exec ? pmodeint(num, 1) : pmodeint_2386(num, 1);
else {
addr = (num << 2) + idt.base;
if ((num << 2UL) + 3UL > idt.limit)
x86_int(0x0d);
else {
if (stack32) {
writememw(ss, ESP - 2, cpu_state.flags);
writememw(ss, ESP - 4, CS);
writememw(ss, ESP - 6, cpu_state.pc);
ESP -= 6;
} else {
writememw(ss, ((SP - 2) & 0xFFFF), cpu_state.flags);
writememw(ss, ((SP - 4) & 0xFFFF), CS);
writememw(ss, ((SP - 6) & 0xFFFF), cpu_state.pc);
SP -= 6;
}
cpu_state.flags &= ~I_FLAG;
cpu_state.flags &= ~T_FLAG;
#ifndef USE_NEW_DYNAREC
oxpc = cpu_state.pc;
#endif
cpu_state.pc = readmemw(0, addr);
cpu_use_exec ? loadcs(readmemw(0, addr + 2)) : loadcs_2386(readmemw(0, addr + 2));
cycles -= timing_int_rm;
}
}
if (cpu_use_exec)
trap = 0;
else
trap &= ~1;
CPU_BLOCK_END();
}
int
x86_int_sw_rm(int num)
{
uint32_t addr;
uint16_t new_pc;
uint16_t new_cs;
flags_rebuild();
cycles -= timing_int;
addr = num << 2;
new_pc = readmemw(0, addr);
new_cs = readmemw(0, addr + 2);
if (cpu_state.abrt)
return 1;
writememw(ss, ((SP - 2) & 0xFFFF), cpu_state.flags);
if (cpu_state.abrt)
return 1;
writememw(ss, ((SP - 4) & 0xFFFF), CS);
writememw(ss, ((SP - 6) & 0xFFFF), cpu_state.pc);
if (cpu_state.abrt)
return 1;
SP -= 6;
cpu_state.eflags &= ~VIF_FLAG;
cpu_state.flags &= ~T_FLAG;
cpu_state.pc = new_pc;
cpu_use_exec ? loadcs(new_cs) : loadcs_2386(new_cs);
#ifndef USE_NEW_DYNAREC
oxpc = cpu_state.pc;
#endif
cycles -= timing_int_rm;
if (cpu_use_exec)
trap = 0;
else
trap &= ~1;
CPU_BLOCK_END();
return 0;
}
void
x86illegal(void)
{
x86_int(6);
}
int
checkio(uint32_t port, int mask)
{
uint32_t t;
if (!(tr.access & 0x08)) {
if ((CPL) > (IOPL))
return 1;
return 0;
}
cpl_override = 1;
t = readmemw(tr.base, 0x66);
if (UNLIKELY(cpu_state.abrt)) {
cpl_override = 0;
return 0;
}
t += (port >> 3UL);
mask <<= (port & 7);
if (UNLIKELY(mask & 0xff00)) {
if (LIKELY(t < tr.limit))
mask &= readmemwl(tr.base + t);
} else {
if (LIKELY(t <= tr.limit))
mask &= readmembl(tr.base + t);
}
cpl_override = 0;
return mask;
}
#ifdef OLD_DIVEXCP
# define divexcp() \
{ \
x386_common_log("Divide exception at %04X(%06X):%04X\n", CS, cs, cpu_state.pc); \
x86_int(0); \
}
#else
# define divexcp() \
{ \
x86de(NULL, 0); \
}
#endif
int
divl(uint32_t val)
{
uint64_t num;
uint64_t quo;
uint32_t rem;
uint32_t quo32;
if (val == 0) {
divexcp();
return 1;
}
num = (((uint64_t) EDX) << 32) | EAX;
quo = num / val;
rem = num % val;
quo32 = (uint32_t) (quo & 0xFFFFFFFF);
if (quo != (uint64_t) quo32) {
divexcp();
return 1;
}
EDX = rem;
EAX = quo32;
return 0;
}
int
idivl(int32_t val)
{
int64_t num;
int64_t quo;
int32_t rem;
int32_t quo32;
if (val == 0) {
divexcp();
return 1;
}
num = (((uint64_t) EDX) << 32) | EAX;
quo = num / val;
rem = num % val;
quo32 = (int32_t) (quo & 0xFFFFFFFF);
if (quo != (int64_t) quo32) {
divexcp();
return 1;
}
EDX = rem;
EAX = quo32;
return 0;
}
void
cpu_386_flags_extract(void)
{
flags_extract();
}
void
cpu_386_flags_rebuild(void)
{
flags_rebuild();
}
extern uint64_t mmutranslate_noabrt_2386(uint32_t addr, int rw);
int
cpu_386_check_instruction_fault(void)
{
int i = 0;
int fault = 0;
/* Report no fault if RF is set. */
if (cpu_state.eflags & RF_FLAG)
return 0;
/* Make sure breakpoints are enabled. */
if (!(dr[7] & 0xFF))
return 0;
for (i = 0; i < 4; i++) {
int breakpoint_enabled = !!(dr[7] & (0x3 << (2 * i))) && !(dr[7] & (0x30000 << (4 * i)));
uint32_t translated_addr = 0xffffffff;
if (!breakpoint_enabled)
continue;
translated_addr = dr[i];
if ((cs + cpu_state.pc) == (uint32_t)translated_addr) {
dr[6] |= (1 << i);
fault = 1;
}
}
return fault;
}
int
sysenter(uint32_t fetchdat)
{
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSENTER called\n");
#endif
if (!(msw & 1)) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSENTER: CPU not in protected mode");
#endif
x86gpf("SYSENTER: CPU not in protected mode", 0);
return cpu_state.abrt;
}
if (!(msr.sysenter_cs & 0xFFF8)) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSENTER: CS MSR is zero");
#endif
x86gpf("SYSENTER: CS MSR is zero", 0);
return cpu_state.abrt;
}
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSENTER started:\n");
x386_common_log(" CS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; EIP=%08X\n", cpu_state.seg_cs.seg, !!cpu_state.seg_cs.checked, cpu_state.seg_cs.base, cpu_state.seg_cs.limit, cpu_state.seg_cs.limit_low, cpu_state.seg_cs.limit_high, cpu_state.seg_cs.ar_high, cpu_state.seg_cs.access, cpu_state.pc);
x386_common_log(" SS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; ESP=%08X\n", cpu_state.seg_ss.seg, !!cpu_state.seg_ss.checked, cpu_state.seg_ss.base, cpu_state.seg_ss.limit, cpu_state.seg_ss.limit_low, cpu_state.seg_ss.limit_high, cpu_state.seg_ss.ar_high, cpu_state.seg_ss.access, ESP);
x386_common_log(" Misc. : MSR (CS/ESP/EIP)=%04X/%08X/%08X pccache=%08X/%08X\n", msr.sysenter_cs, msr.sysenter_esp, msr.sysenter_eip, pccache, pccache2);
x386_common_log(" EFLAGS=%04X%04X/%i 32=%i/%i ECX=%08X EDX=%08X abrt=%02X\n", cpu_state.eflags, cpu_state.flags, !!trap, !!use32, !!stack32, ECX, EDX, cpu_state.abrt);
#endif
/* Set VM, RF, and IF to 0. */
cpu_state.eflags &= ~(RF_FLAG | VM_FLAG);
cpu_state.flags &= ~I_FLAG;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
ESP = msr.sysenter_esp;
cpu_state.pc = msr.sysenter_eip;
cpu_state.seg_cs.seg = (msr.sysenter_cs & 0xfffc);
cpu_state.seg_cs.base = 0;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = 0x9b;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_cs.checked = 1;
oldcpl = 0;
cpu_state.seg_ss.seg = ((msr.sysenter_cs + 8) & 0xfffc);
cpu_state.seg_ss.base = 0;
cpu_state.seg_ss.limit_low = 0;
cpu_state.seg_ss.limit = 0xffffffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0x93;
cpu_state.seg_ss.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 /* | CPU_STATUS_PMODE*/);
set_use32(1);
set_stack32(1);
in_sys = 1;
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSENTER completed:\n");
x386_common_log(" CS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; EIP=%08X\n", cpu_state.seg_cs.seg, !!cpu_state.seg_cs.checked, cpu_state.seg_cs.base, cpu_state.seg_cs.limit, cpu_state.seg_cs.limit_low, cpu_state.seg_cs.limit_high, cpu_state.seg_cs.ar_high, cpu_state.seg_cs.access, cpu_state.pc);
x386_common_log(" SS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; ESP=%08X\n", cpu_state.seg_ss.seg, !!cpu_state.seg_ss.checked, cpu_state.seg_ss.base, cpu_state.seg_ss.limit, cpu_state.seg_ss.limit_low, cpu_state.seg_ss.limit_high, cpu_state.seg_ss.ar_high, cpu_state.seg_ss.access, ESP);
x386_common_log(" Misc. : MSR (CS/ESP/EIP)=%04X/%08X/%08X pccache=%08X/%08X\n", msr.sysenter_cs, msr.sysenter_esp, msr.sysenter_eip, pccache, pccache2);
x386_common_log(" EFLAGS=%04X%04X/%i 32=%i/%i ECX=%08X EDX=%08X abrt=%02X\n", cpu_state.eflags, cpu_state.flags, !!trap, !!use32, !!stack32, ECX, EDX, cpu_state.abrt);
#endif
return 1;
}
int
sysexit(uint32_t fetchdat)
{
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSEXIT called\n");
#endif
if (!(msr.sysenter_cs & 0xFFF8)) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSEXIT: CS MSR is zero");
#endif
x86gpf("SYSEXIT: CS MSR is zero", 0);
return cpu_state.abrt;
}
if (!(msw & 1)) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSEXIT: CPU not in protected mode");
#endif
x86gpf("SYSEXIT: CPU not in protected mode", 0);
return cpu_state.abrt;
}
if (CPL) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSEXIT: CPL not 0");
#endif
x86gpf("SYSEXIT: CPL not 0", 0);
return cpu_state.abrt;
}
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSEXIT start:\n");
x386_common_log(" CS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; EIP=%08X\n", cpu_state.seg_cs.seg, !!cpu_state.seg_cs.checked, cpu_state.seg_cs.base, cpu_state.seg_cs.limit, cpu_state.seg_cs.limit_low, cpu_state.seg_cs.limit_high, cpu_state.seg_cs.ar_high, cpu_state.seg_cs.access, cpu_state.pc);
x386_common_log(" SS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; ESP=%08X\n", cpu_state.seg_ss.seg, !!cpu_state.seg_ss.checked, cpu_state.seg_ss.base, cpu_state.seg_ss.limit, cpu_state.seg_ss.limit_low, cpu_state.seg_ss.limit_high, cpu_state.seg_ss.ar_high, cpu_state.seg_ss.access, ESP);
x386_common_log(" Misc. : MSR (CS/ESP/EIP)=%04X/%08X/%08X pccache=%08X/%08X\n", msr.sysenter_cs, msr.sysenter_esp, msr.sysenter_eip, pccache, pccache2);
x386_common_log(" EFLAGS=%04X%04X/%i 32=%i/%i ECX=%08X EDX=%08X abrt=%02X\n", cpu_state.eflags, cpu_state.flags, !!trap, !!use32, !!stack32, ECX, EDX, cpu_state.abrt);
#endif
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
ESP = ECX;
cpu_state.pc = EDX;
cpu_state.seg_cs.seg = (((msr.sysenter_cs + 16) & 0xfffc) | 3);
cpu_state.seg_cs.base = 0;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = 0xfb;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_cs.checked = 1;
oldcpl = 3;
cpu_state.seg_ss.seg = (((msr.sysenter_cs + 24) & 0xfffc) | 3);
cpu_state.seg_ss.base = 0;
cpu_state.seg_ss.limit_low = 0;
cpu_state.seg_ss.limit = 0xffffffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0xf3;
cpu_state.seg_ss.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS /* | CPU_STATUS_V86*/);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
flushmmucache_nopc();
set_use32(1);
set_stack32(1);
in_sys = 0;
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSEXIT completed:\n");
x386_common_log(" CS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; EIP=%08X\n", cpu_state.seg_cs.seg, !!cpu_state.seg_cs.checked, cpu_state.seg_cs.base, cpu_state.seg_cs.limit, cpu_state.seg_cs.limit_low, cpu_state.seg_cs.limit_high, cpu_state.seg_cs.ar_high, cpu_state.seg_cs.access, cpu_state.pc);
x386_common_log(" SS %04X/%i: b=%08X l=%08X (%08X-%08X) a=%02X%02X; ESP=%08X\n", cpu_state.seg_ss.seg, !!cpu_state.seg_ss.checked, cpu_state.seg_ss.base, cpu_state.seg_ss.limit, cpu_state.seg_ss.limit_low, cpu_state.seg_ss.limit_high, cpu_state.seg_ss.ar_high, cpu_state.seg_ss.access, ESP);
x386_common_log(" Misc. : MSR (CS/ESP/EIP)=%04X/%08X/%08X pccache=%08X/%08X\n", msr.sysenter_cs, msr.sysenter_esp, msr.sysenter_eip, pccache, pccache2);
x386_common_log(" EFLAGS=%04X%04X/%i 32=%i/%i ECX=%08X EDX=%08X abrt=%02X\n", cpu_state.eflags, cpu_state.flags, !!trap, !!use32, !!stack32, ECX, EDX, cpu_state.abrt);
#endif
return 1;
}
int
syscall_op(uint32_t fetchdat)
{
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSCALL called\n");
#endif
/* Let's do this by the AMD spec. */
/* Set VM and IF to 0. */
cpu_state.eflags &= ~VM_FLAG;
cpu_state.flags &= ~I_FLAG;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
ECX = cpu_state.pc;
/* CS */
CS = AMD_SYSCALL_SB & 0xfffc;
cpu_state.seg_cs.base = 0;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = 0x9b;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_cs.checked = 1;
oldcpl = 0;
/* SS */
SS = (AMD_SYSCALL_SB + 8) & 0xfffc;
cpu_state.seg_ss.base = 0;
cpu_state.seg_ss.limit_low = 0;
cpu_state.seg_ss.limit = 0xffffffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0x93;
cpu_state.seg_ss.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS | CPU_STATUS_V86);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
set_use32(1);
set_stack32(1);
in_sys = 1;
return 1;
}
int
sysret(uint32_t fetchdat)
{
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSRET called\n");
#endif
if (CPL) {
#ifdef ENABLE_386_COMMON_LOG
x386_common_log("SYSRET: CPL not 0");
#endif
x86gpf("SYSRET: CPL not 0", 0);
return cpu_state.abrt;
}
cpu_state.flags |= I_FLAG;
/* First instruction after SYSRET will always execute, regardless of whether
there is a pending interrupt, following the STI logic */
cpu_end_block_after_ins = 2;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.pc = ECX;
/* CS */
CS = (AMD_SYSRET_SB & 0xfffc) | 3;
cpu_state.seg_cs.base = 0;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit = 0xffffffff;
cpu_state.seg_cs.limit_high = 0xffffffff;
cpu_state.seg_cs.access = 0xfb;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_cs.checked = 1;
oldcpl = 3;
/* SS */
SS = ((AMD_SYSRET_SB + 8) & 0xfffc) | 3;
cpu_state.seg_ss.base = 0;
cpu_state.seg_ss.limit_low = 0;
cpu_state.seg_ss.limit = 0xffffffff;
cpu_state.seg_ss.limit_high = 0xffffffff;
cpu_state.seg_ss.access = 0xf3;
cpu_state.seg_cs.ar_high = 0xcf;
cpu_state.seg_ss.checked = 1;
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
cpu_cur_status &= ~(CPU_STATUS_NOTFLATSS /* | CPU_STATUS_V86*/);
cpu_cur_status |= (CPU_STATUS_USE32 | CPU_STATUS_STACK32 | CPU_STATUS_PMODE);
flushmmucache_nopc();
set_use32(1);
set_stack32(1);
in_sys = 0;
return 1;
}
void
cpu_register_fast_off_handler(void *timer)
{
cpu_fast_off_timer = (pc_timer_t *) timer;
}
void
cpu_fast_off_advance(void)
{
timer_disable(cpu_fast_off_timer);
if (cpu_fast_off_period != 0.0)
timer_on_auto(cpu_fast_off_timer, cpu_fast_off_period);
}
void
cpu_fast_off_period_set(uint16_t val, double period)
{
cpu_fast_off_period = ((double) (val + 1)) * period;
cpu_fast_off_advance();
}
void
cpu_fast_off_reset(void)
{
cpu_register_fast_off_handler(NULL);
cpu_fast_off_period = 0.0;
cpu_fast_off_advance();
}
void
smi_raise(void)
{
if (is486 && (cpu_fast_off_flags & 0x80000000))
cpu_fast_off_advance();
smi_line = 1;
}
void
nmi_raise(void)
{
if (is486 && (cpu_fast_off_flags & 0x20000000))
cpu_fast_off_advance();
nmi = 1;
}
#ifndef USE_DYNAREC
/* This is for compatibility with new x87 code. */
void
codegen_set_rounding_mode(int mode)
{
/* cpu_state.new_npxc = (cpu_state.old_npxc & ~0xc00) | (mode << 10); */
}
#endif
``` | /content/code_sandbox/src/cpu/386_common.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 29,149 |
```objective-c
extern int tempc;
enum {
FLAGS_UNKNOWN,
FLAGS_ZN8,
FLAGS_ZN16,
FLAGS_ZN32,
FLAGS_ADD8,
FLAGS_ADD16,
FLAGS_ADD32,
FLAGS_SUB8,
FLAGS_SUB16,
FLAGS_SUB32,
FLAGS_SHL8,
FLAGS_SHL16,
FLAGS_SHL32,
FLAGS_SHR8,
FLAGS_SHR16,
FLAGS_SHR32,
FLAGS_SAR8,
FLAGS_SAR16,
FLAGS_SAR32,
#ifdef USE_NEW_DYNAREC
FLAGS_ROL8,
FLAGS_ROL16,
FLAGS_ROL32,
FLAGS_ROR8,
FLAGS_ROR16,
FLAGS_ROR32,
#endif
FLAGS_INC8,
FLAGS_INC16,
FLAGS_INC32,
FLAGS_DEC8,
FLAGS_DEC16,
FLAGS_DEC32
#ifdef USE_NEW_DYNAREC
,
FLAGS_ADC8,
FLAGS_ADC16,
FLAGS_ADC32,
FLAGS_SBC8,
FLAGS_SBC16,
FLAGS_SBC32
#endif
};
static __inline int
ZF_SET(void)
{
switch (cpu_state.flags_op) {
case FLAGS_ZN8:
case FLAGS_ZN16:
case FLAGS_ZN32:
case FLAGS_ADD8:
case FLAGS_ADD16:
case FLAGS_ADD32:
case FLAGS_SUB8:
case FLAGS_SUB16:
case FLAGS_SUB32:
case FLAGS_SHL8:
case FLAGS_SHL16:
case FLAGS_SHL32:
case FLAGS_SHR8:
case FLAGS_SHR16:
case FLAGS_SHR32:
case FLAGS_SAR8:
case FLAGS_SAR16:
case FLAGS_SAR32:
case FLAGS_INC8:
case FLAGS_INC16:
case FLAGS_INC32:
case FLAGS_DEC8:
case FLAGS_DEC16:
case FLAGS_DEC32:
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC8:
case FLAGS_ADC16:
case FLAGS_ADC32:
case FLAGS_SBC8:
case FLAGS_SBC16:
case FLAGS_SBC32:
#endif
return !cpu_state.flags_res;
#ifdef USE_NEW_DYNAREC
case FLAGS_ROL8:
case FLAGS_ROL16:
case FLAGS_ROL32:
case FLAGS_ROR8:
case FLAGS_ROR16:
case FLAGS_ROR32:
#endif
case FLAGS_UNKNOWN:
return cpu_state.flags & Z_FLAG;
#ifndef USE_NEW_DYNAREC
default:
return 0;
#endif
}
#ifdef USE_NEW_DYNAREC
return 0;
#endif
}
static __inline int
NF_SET(void)
{
switch (cpu_state.flags_op) {
case FLAGS_ZN8:
case FLAGS_ADD8:
case FLAGS_SUB8:
case FLAGS_SHL8:
case FLAGS_SHR8:
case FLAGS_SAR8:
case FLAGS_INC8:
case FLAGS_DEC8:
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC8:
case FLAGS_SBC8:
#endif
return cpu_state.flags_res & 0x80;
case FLAGS_ZN16:
case FLAGS_ADD16:
case FLAGS_SUB16:
case FLAGS_SHL16:
case FLAGS_SHR16:
case FLAGS_SAR16:
case FLAGS_INC16:
case FLAGS_DEC16:
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC16:
case FLAGS_SBC16:
#endif
return cpu_state.flags_res & 0x8000;
case FLAGS_ZN32:
case FLAGS_ADD32:
case FLAGS_SUB32:
case FLAGS_SHL32:
case FLAGS_SHR32:
case FLAGS_SAR32:
case FLAGS_INC32:
case FLAGS_DEC32:
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC32:
case FLAGS_SBC32:
#endif
return cpu_state.flags_res & 0x80000000;
#ifdef USE_NEW_DYNAREC
case FLAGS_ROL8:
case FLAGS_ROL16:
case FLAGS_ROL32:
case FLAGS_ROR8:
case FLAGS_ROR16:
case FLAGS_ROR32:
#endif
case FLAGS_UNKNOWN:
return cpu_state.flags & N_FLAG;
#ifndef USE_NEW_DYNAREC
default:
return 0;
#endif
}
#ifdef USE_NEW_DYNAREC
return 0;
#endif
}
static __inline int
PF_SET(void)
{
switch (cpu_state.flags_op) {
case FLAGS_ZN8:
case FLAGS_ZN16:
case FLAGS_ZN32:
case FLAGS_ADD8:
case FLAGS_ADD16:
case FLAGS_ADD32:
case FLAGS_SUB8:
case FLAGS_SUB16:
case FLAGS_SUB32:
case FLAGS_SHL8:
case FLAGS_SHL16:
case FLAGS_SHL32:
case FLAGS_SHR8:
case FLAGS_SHR16:
case FLAGS_SHR32:
case FLAGS_SAR8:
case FLAGS_SAR16:
case FLAGS_SAR32:
case FLAGS_INC8:
case FLAGS_INC16:
case FLAGS_INC32:
case FLAGS_DEC8:
case FLAGS_DEC16:
case FLAGS_DEC32:
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC8:
case FLAGS_ADC16:
case FLAGS_ADC32:
case FLAGS_SBC8:
case FLAGS_SBC16:
case FLAGS_SBC32:
#endif
return znptable8[cpu_state.flags_res & 0xff] & P_FLAG;
#ifdef USE_NEW_DYNAREC
case FLAGS_ROL8:
case FLAGS_ROL16:
case FLAGS_ROL32:
case FLAGS_ROR8:
case FLAGS_ROR16:
case FLAGS_ROR32:
#endif
case FLAGS_UNKNOWN:
return cpu_state.flags & P_FLAG;
#ifndef USE_NEW_DYNAREC
default:
return 0;
#endif
}
#ifdef USE_NEW_DYNAREC
return 0;
#endif
}
static __inline int
VF_SET(void)
{
switch (cpu_state.flags_op) {
case FLAGS_ZN8:
case FLAGS_ZN16:
case FLAGS_ZN32:
case FLAGS_SAR8:
case FLAGS_SAR16:
case FLAGS_SAR32:
return 0;
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC8:
#endif
case FLAGS_ADD8:
case FLAGS_INC8:
return !((cpu_state.flags_op1 ^ cpu_state.flags_op2) & 0x80) && ((cpu_state.flags_op1 ^ cpu_state.flags_res) & 0x80);
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC16:
#endif
case FLAGS_ADD16:
case FLAGS_INC16:
return !((cpu_state.flags_op1 ^ cpu_state.flags_op2) & 0x8000) && ((cpu_state.flags_op1 ^ cpu_state.flags_res) & 0x8000);
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC32:
#endif
case FLAGS_ADD32:
case FLAGS_INC32:
return !((cpu_state.flags_op1 ^ cpu_state.flags_op2) & 0x80000000) && ((cpu_state.flags_op1 ^ cpu_state.flags_res) & 0x80000000);
#ifdef USE_NEW_DYNAREC
case FLAGS_SBC8:
#endif
case FLAGS_SUB8:
case FLAGS_DEC8:
return ((cpu_state.flags_op1 ^ cpu_state.flags_op2) & (cpu_state.flags_op1 ^ cpu_state.flags_res) & 0x80);
#ifdef USE_NEW_DYNAREC
case FLAGS_SBC16:
#endif
case FLAGS_SUB16:
case FLAGS_DEC16:
return ((cpu_state.flags_op1 ^ cpu_state.flags_op2) & (cpu_state.flags_op1 ^ cpu_state.flags_res) & 0x8000);
#ifdef USE_NEW_DYNAREC
case FLAGS_SBC32:
#endif
case FLAGS_SUB32:
case FLAGS_DEC32:
return ((cpu_state.flags_op1 ^ cpu_state.flags_op2) & (cpu_state.flags_op1 ^ cpu_state.flags_res) & 0x80000000);
case FLAGS_SHL8:
return (((cpu_state.flags_op1 << cpu_state.flags_op2) ^ (cpu_state.flags_op1 << (cpu_state.flags_op2 - 1))) & 0x80);
case FLAGS_SHL16:
return (((cpu_state.flags_op1 << cpu_state.flags_op2) ^ (cpu_state.flags_op1 << (cpu_state.flags_op2 - 1))) & 0x8000);
case FLAGS_SHL32:
return (((cpu_state.flags_op1 << cpu_state.flags_op2) ^ (cpu_state.flags_op1 << (cpu_state.flags_op2 - 1))) & 0x80000000);
case FLAGS_SHR8:
return ((cpu_state.flags_op2 == 1) && (cpu_state.flags_op1 & 0x80));
case FLAGS_SHR16:
return ((cpu_state.flags_op2 == 1) && (cpu_state.flags_op1 & 0x8000));
case FLAGS_SHR32:
return ((cpu_state.flags_op2 == 1) && (cpu_state.flags_op1 & 0x80000000));
#ifdef USE_NEW_DYNAREC
case FLAGS_ROL8:
return (cpu_state.flags_res ^ (cpu_state.flags_res >> 7)) & 1;
case FLAGS_ROL16:
return (cpu_state.flags_res ^ (cpu_state.flags_res >> 15)) & 1;
case FLAGS_ROL32:
return (cpu_state.flags_res ^ (cpu_state.flags_res >> 31)) & 1;
case FLAGS_ROR8:
return (cpu_state.flags_res ^ (cpu_state.flags_res >> 1)) & 0x40;
case FLAGS_ROR16:
return (cpu_state.flags_res ^ (cpu_state.flags_res >> 1)) & 0x4000;
case FLAGS_ROR32:
return (cpu_state.flags_res ^ (cpu_state.flags_res >> 1)) & 0x40000000;
#endif
case FLAGS_UNKNOWN:
return cpu_state.flags & V_FLAG;
#ifndef USE_NEW_DYNAREC
default:
return 0;
#endif
}
#ifdef USE_NEW_DYNAREC
return 0;
#endif
}
static __inline int
AF_SET(void)
{
switch (cpu_state.flags_op) {
case FLAGS_ZN8:
case FLAGS_ZN16:
case FLAGS_ZN32:
case FLAGS_SHL8:
case FLAGS_SHL16:
case FLAGS_SHL32:
case FLAGS_SHR8:
case FLAGS_SHR16:
case FLAGS_SHR32:
case FLAGS_SAR8:
case FLAGS_SAR16:
case FLAGS_SAR32:
return 0;
case FLAGS_ADD8:
case FLAGS_ADD16:
case FLAGS_ADD32:
case FLAGS_INC8:
case FLAGS_INC16:
case FLAGS_INC32:
return ((cpu_state.flags_op1 & 0xF) + (cpu_state.flags_op2 & 0xF)) & 0x10;
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC8:
return ((cpu_state.flags_res & 0xf) < (cpu_state.flags_op1 & 0xf)) || ((cpu_state.flags_res & 0xf) == (cpu_state.flags_op1 & 0xf) && cpu_state.flags_op2 == 0xff);
case FLAGS_ADC16:
return ((cpu_state.flags_res & 0xf) < (cpu_state.flags_op1 & 0xf)) || ((cpu_state.flags_res & 0xf) == (cpu_state.flags_op1 & 0xf) && cpu_state.flags_op2 == 0xffff);
case FLAGS_ADC32:
return ((cpu_state.flags_res & 0xf) < (cpu_state.flags_op1 & 0xf)) || ((cpu_state.flags_res & 0xf) == (cpu_state.flags_op1 & 0xf) && cpu_state.flags_op2 == 0xffffffff);
#endif
case FLAGS_SUB8:
case FLAGS_SUB16:
case FLAGS_SUB32:
case FLAGS_DEC8:
case FLAGS_DEC16:
case FLAGS_DEC32:
return ((cpu_state.flags_op1 & 0xF) - (cpu_state.flags_op2 & 0xF)) & 0x10;
#ifdef USE_NEW_DYNAREC
case FLAGS_SBC8:
case FLAGS_SBC16:
case FLAGS_SBC32:
return ((cpu_state.flags_op1 & 0xf) < (cpu_state.flags_op2 & 0xf)) || ((cpu_state.flags_op1 & 0xf) == (cpu_state.flags_op2 & 0xf) && (cpu_state.flags_res & 0xf) != 0);
case FLAGS_ROL8:
case FLAGS_ROL16:
case FLAGS_ROL32:
case FLAGS_ROR8:
case FLAGS_ROR16:
case FLAGS_ROR32:
#endif
case FLAGS_UNKNOWN:
return cpu_state.flags & A_FLAG;
#ifndef USE_NEW_DYNAREC
default:
return 0;
#endif
}
#ifdef USE_NEW_DYNAREC
return 0;
#endif
}
static __inline int
CF_SET(void)
{
switch (cpu_state.flags_op) {
case FLAGS_ADD8:
return ((cpu_state.flags_op1 + cpu_state.flags_op2) & 0x100) ? 1 : 0;
case FLAGS_ADD16:
return ((cpu_state.flags_op1 + cpu_state.flags_op2) & 0x10000) ? 1 : 0;
case FLAGS_ADD32:
return (cpu_state.flags_res < cpu_state.flags_op1);
#ifdef USE_NEW_DYNAREC
case FLAGS_ADC8:
return (cpu_state.flags_res < cpu_state.flags_op1) || (cpu_state.flags_res == cpu_state.flags_op1 && cpu_state.flags_op2 == 0xff);
case FLAGS_ADC16:
return (cpu_state.flags_res < cpu_state.flags_op1) || (cpu_state.flags_res == cpu_state.flags_op1 && cpu_state.flags_op2 == 0xffff);
case FLAGS_ADC32:
return (cpu_state.flags_res < cpu_state.flags_op1) || (cpu_state.flags_res == cpu_state.flags_op1 && cpu_state.flags_op2 == 0xffffffff);
#endif
case FLAGS_SUB8:
case FLAGS_SUB16:
case FLAGS_SUB32:
return (cpu_state.flags_op1 < cpu_state.flags_op2);
#ifdef USE_NEW_DYNAREC
case FLAGS_SBC8:
case FLAGS_SBC16:
case FLAGS_SBC32:
return (cpu_state.flags_op1 < cpu_state.flags_op2) || (cpu_state.flags_op1 == cpu_state.flags_op2 && cpu_state.flags_res != 0);
#endif
case FLAGS_SHL8:
return ((cpu_state.flags_op1 << (cpu_state.flags_op2 - 1)) & 0x80) ? 1 : 0;
case FLAGS_SHL16:
return ((cpu_state.flags_op1 << (cpu_state.flags_op2 - 1)) & 0x8000) ? 1 : 0;
case FLAGS_SHL32:
return ((cpu_state.flags_op1 << (cpu_state.flags_op2 - 1)) & 0x80000000) ? 1 : 0;
case FLAGS_SHR8:
case FLAGS_SHR16:
case FLAGS_SHR32:
return (cpu_state.flags_op1 >> (cpu_state.flags_op2 - 1)) & 1;
case FLAGS_SAR8:
return ((int8_t) cpu_state.flags_op1 >> (cpu_state.flags_op2 - 1)) & 1;
case FLAGS_SAR16:
return ((int16_t) cpu_state.flags_op1 >> (cpu_state.flags_op2 - 1)) & 1;
case FLAGS_SAR32:
return ((int32_t) cpu_state.flags_op1 >> (cpu_state.flags_op2 - 1)) & 1;
case FLAGS_ZN8:
case FLAGS_ZN16:
case FLAGS_ZN32:
return 0;
#ifdef USE_NEW_DYNAREC
case FLAGS_ROL8:
case FLAGS_ROL16:
case FLAGS_ROL32:
return cpu_state.flags_res & 1;
case FLAGS_ROR8:
return (cpu_state.flags_res & 0x80) ? 1 : 0;
case FLAGS_ROR16:
return (cpu_state.flags_res & 0x8000) ? 1 : 0;
case FLAGS_ROR32:
return (cpu_state.flags_res & 0x80000000) ? 1 : 0;
#endif
case FLAGS_DEC8:
case FLAGS_DEC16:
case FLAGS_DEC32:
case FLAGS_INC8:
case FLAGS_INC16:
case FLAGS_INC32:
case FLAGS_UNKNOWN:
return cpu_state.flags & C_FLAG;
#ifndef USE_NEW_DYNAREC
default:
return 0;
#endif
}
#ifdef USE_NEW_DYNAREC
return 0;
#endif
}
static __inline void
flags_rebuild(void)
{
if (cpu_state.flags_op != FLAGS_UNKNOWN) {
uint16_t tempf = 0;
if (CF_SET())
tempf |= C_FLAG;
if (PF_SET())
tempf |= P_FLAG;
if (AF_SET())
tempf |= A_FLAG;
if (ZF_SET())
tempf |= Z_FLAG;
if (NF_SET())
tempf |= N_FLAG;
if (VF_SET())
tempf |= V_FLAG;
cpu_state.flags = (cpu_state.flags & ~0x8d5) | tempf;
cpu_state.flags_op = FLAGS_UNKNOWN;
}
}
static __inline void
flags_extract(void)
{
cpu_state.flags_op = FLAGS_UNKNOWN;
}
static __inline void
flags_rebuild_c(void)
{
if (cpu_state.flags_op != FLAGS_UNKNOWN) {
if (CF_SET())
cpu_state.flags |= C_FLAG;
else
cpu_state.flags &= ~C_FLAG;
}
}
#ifdef USE_NEW_DYNAREC
static __inline int
flags_res_valid(void)
{
if ((cpu_state.flags_op == FLAGS_UNKNOWN) || ((cpu_state.flags_op >= FLAGS_ROL8) && (cpu_state.flags_op <= FLAGS_ROR32)))
return 0;
return 1;
}
#endif
static __inline void
setznp8(uint8_t val)
{
cpu_state.flags_op = FLAGS_ZN8;
cpu_state.flags_res = val;
}
static __inline void
setznp16(uint16_t val)
{
cpu_state.flags_op = FLAGS_ZN16;
cpu_state.flags_res = val;
}
static __inline void
setznp32(uint32_t val)
{
cpu_state.flags_op = FLAGS_ZN32;
cpu_state.flags_res = val;
}
#define set_flags_shift(op, orig, shift, res) \
cpu_state.flags_op = op; \
cpu_state.flags_res = res; \
cpu_state.flags_op1 = orig; \
cpu_state.flags_op2 = shift;
#ifdef USE_NEW_DYNAREC
# define set_flags_rotate(op, res) \
cpu_state.flags_op = op; \
cpu_state.flags_res = res;
#endif
static __inline void
setadd8(uint8_t a, uint8_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a + b) & 0xff;
cpu_state.flags_op = FLAGS_ADD8;
}
static __inline void
setadd16(uint16_t a, uint16_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a + b) & 0xffff;
cpu_state.flags_op = FLAGS_ADD16;
}
static __inline void
setadd32(uint32_t a, uint32_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = a + b;
cpu_state.flags_op = FLAGS_ADD32;
}
static __inline void
setadd8nc(uint8_t a, uint8_t b)
{
flags_rebuild_c();
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a + b) & 0xff;
cpu_state.flags_op = FLAGS_INC8;
}
static __inline void
setadd16nc(uint16_t a, uint16_t b)
{
flags_rebuild_c();
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a + b) & 0xffff;
cpu_state.flags_op = FLAGS_INC16;
}
static __inline void
setadd32nc(uint32_t a, uint32_t b)
{
flags_rebuild_c();
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = a + b;
cpu_state.flags_op = FLAGS_INC32;
}
static __inline void
setsub8(uint8_t a, uint8_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a - b) & 0xff;
cpu_state.flags_op = FLAGS_SUB8;
}
static __inline void
setsub16(uint16_t a, uint16_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a - b) & 0xffff;
cpu_state.flags_op = FLAGS_SUB16;
}
static __inline void
setsub32(uint32_t a, uint32_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = a - b;
cpu_state.flags_op = FLAGS_SUB32;
}
static __inline void
setsub8nc(uint8_t a, uint8_t b)
{
flags_rebuild_c();
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a - b) & 0xff;
cpu_state.flags_op = FLAGS_DEC8;
}
static __inline void
setsub16nc(uint16_t a, uint16_t b)
{
flags_rebuild_c();
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a - b) & 0xffff;
cpu_state.flags_op = FLAGS_DEC16;
}
static __inline void
setsub32nc(uint32_t a, uint32_t b)
{
flags_rebuild_c();
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = a - b;
cpu_state.flags_op = FLAGS_DEC32;
}
#ifdef USE_NEW_DYNAREC
static __inline void
setadc8(uint8_t a, uint8_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a + b + tempc) & 0xff;
cpu_state.flags_op = FLAGS_ADC8;
}
static __inline void
setadc16(uint16_t a, uint16_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a + b + tempc) & 0xffff;
cpu_state.flags_op = FLAGS_ADC16;
}
static __inline void
setadc32(uint32_t a, uint32_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = a + b + tempc;
cpu_state.flags_op = FLAGS_ADC32;
}
static __inline void
setsbc8(uint8_t a, uint8_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a - (b + tempc)) & 0xff;
cpu_state.flags_op = FLAGS_SBC8;
}
static __inline void
setsbc16(uint16_t a, uint16_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = (a - (b + tempc)) & 0xffff;
cpu_state.flags_op = FLAGS_SBC16;
}
static __inline void
setsbc32(uint32_t a, uint32_t b)
{
cpu_state.flags_op1 = a;
cpu_state.flags_op2 = b;
cpu_state.flags_res = a - (b + tempc);
cpu_state.flags_op = FLAGS_SBC32;
}
#else
static __inline void
setadc8(uint8_t a, uint8_t b)
{
uint16_t c = (uint16_t) a + (uint16_t) b + tempc;
cpu_state.flags_op = FLAGS_UNKNOWN;
cpu_state.flags &= ~0x8D5;
cpu_state.flags |= znptable8[c & 0xFF];
if (c & 0x100)
cpu_state.flags |= C_FLAG;
if (!((a ^ b) & 0x80) && ((a ^ c) & 0x80))
cpu_state.flags |= V_FLAG;
if (((a & 0xF) + (b & 0xF)) & 0x10)
cpu_state.flags |= A_FLAG;
}
static __inline void
setadc16(uint16_t a, uint16_t b)
{
uint32_t c = (uint32_t) a + (uint32_t) b + tempc;
cpu_state.flags_op = FLAGS_UNKNOWN;
cpu_state.flags &= ~0x8D5;
cpu_state.flags |= znptable16[c & 0xFFFF];
if (c & 0x10000)
cpu_state.flags |= C_FLAG;
if (!((a ^ b) & 0x8000) && ((a ^ c) & 0x8000))
cpu_state.flags |= V_FLAG;
if (((a & 0xF) + (b & 0xF)) & 0x10)
cpu_state.flags |= A_FLAG;
}
static __inline void
setadc32(uint32_t a, uint32_t b)
{
uint32_t c = (uint32_t) a + (uint32_t) b + tempc;
cpu_state.flags_op = FLAGS_UNKNOWN;
cpu_state.flags &= ~0x8D5;
cpu_state.flags |= ((c & 0x80000000) ? N_FLAG : ((!c) ? Z_FLAG : 0));
cpu_state.flags |= (znptable8[c & 0xFF] & P_FLAG);
if ((c < a) || (c == a && tempc))
cpu_state.flags |= C_FLAG;
if (!((a ^ b) & 0x80000000) && ((a ^ c) & 0x80000000))
cpu_state.flags |= V_FLAG;
if (((a & 0xF) + (b & 0xF) + tempc) & 0x10)
cpu_state.flags |= A_FLAG;
}
static __inline void
setsbc8(uint8_t a, uint8_t b)
{
uint16_t c = (uint16_t) a - (((uint16_t) b) + tempc);
cpu_state.flags_op = FLAGS_UNKNOWN;
cpu_state.flags &= ~0x8D5;
cpu_state.flags |= znptable8[c & 0xFF];
if (c & 0x100)
cpu_state.flags |= C_FLAG;
if ((a ^ b) & (a ^ c) & 0x80)
cpu_state.flags |= V_FLAG;
if (((a & 0xF) - (b & 0xF)) & 0x10)
cpu_state.flags |= A_FLAG;
}
static __inline void
setsbc16(uint16_t a, uint16_t b)
{
uint32_t c = (uint32_t) a - (((uint32_t) b) + tempc);
cpu_state.flags_op = FLAGS_UNKNOWN;
cpu_state.flags &= ~0x8D5;
cpu_state.flags |= (znptable16[c & 0xFFFF] & ~4);
cpu_state.flags |= (znptable8[c & 0xFF] & 4);
if (c & 0x10000)
cpu_state.flags |= C_FLAG;
if ((a ^ b) & (a ^ c) & 0x8000)
cpu_state.flags |= V_FLAG;
if (((a & 0xF) - (b & 0xF)) & 0x10)
cpu_state.flags |= A_FLAG;
}
static __inline void
setsbc32(uint32_t a, uint32_t b)
{
uint32_t c = (uint32_t) a - (((uint32_t) b) + tempc);
cpu_state.flags_op = FLAGS_UNKNOWN;
cpu_state.flags &= ~0x8D5;
cpu_state.flags |= ((c & 0x80000000) ? N_FLAG : ((!c) ? Z_FLAG : 0));
cpu_state.flags |= (znptable8[c & 0xFF] & P_FLAG);
if ((c > a) || (c == a && tempc))
cpu_state.flags |= C_FLAG;
if ((a ^ b) & (a ^ c) & 0x80000000)
cpu_state.flags |= V_FLAG;
if (((a & 0xF) - ((b & 0xF) + tempc)) & 0x10)
cpu_state.flags |= A_FLAG;
}
#endif
extern void cpu_386_flags_extract(void);
extern void cpu_386_flags_rebuild(void);
``` | /content/code_sandbox/src/cpu/x86_flags.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 6,530 |
```objective-c
#define opSET(condition) \
static int opSET##condition##_a16(uint32_t fetchdat) \
{ \
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
seteab((cond_##condition) ? 1 : 0); \
CLOCK_CYCLES(4); \
return cpu_state.abrt; \
} \
\
static int opSET##condition##_a32(uint32_t fetchdat) \
{ \
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
seteab((cond_##condition) ? 1 : 0); \
CLOCK_CYCLES(4); \
return cpu_state.abrt; \
}
// clang-format off
opSET(O)
opSET(NO)
opSET(B)
opSET(NB)
opSET(E)
opSET(NE)
opSET(BE)
opSET(NBE)
opSET(S)
opSET(NS)
opSET(P)
opSET(NP)
opSET(L)
opSET(NL)
opSET(LE)
opSET(NLE)
// clang-format on
``` | /content/code_sandbox/src/cpu/x86_ops_set.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 283 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* x86 CPU segment emulation common parts header.
*
*
*
* Authors: Miran Grca, <mgrca8@gmail.com>
*
*/
#ifndef EMU_X86SEG_COMMON_H
#define EMU_X86SEG_COMMON_H
#define JMP 1
#define CALL 2
#define IRET 3
#define OPTYPE_INT 4
enum {
ABRT_NONE = 0,
ABRT_GEN = 1,
ABRT_TS = 0xA,
ABRT_NP = 0xB,
ABRT_SS = 0xC,
ABRT_GPF = 0xD,
ABRT_PF = 0xE,
ABRT_DE = 0x40 /* INT 0, but we have to distinguish it from ABRT_NONE. */
};
extern uint8_t opcode2;
extern int cgate16;
extern int cgate32;
extern int intgatesize;
extern void x86seg_reset(void);
extern void x86gen(void);
extern void x86de(char *s, uint16_t error);
extern void x86gpf(char *s, uint16_t error);
extern void x86gpf_expected(char *s, uint16_t error);
extern void x86np(char *s, uint16_t error);
extern void x86ss(char *s, uint16_t error);
extern void x86ts(char *s, uint16_t error);
extern void do_seg_load(x86seg *s, uint16_t *segdat);
#endif /*EMU_X86SEG_COMMON_H*/
``` | /content/code_sandbox/src/cpu/x86seg_common.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 412 |
```objective-c
static int
opMOVSB_a16(uint32_t fetchdat)
{
uint8_t temp;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI);
CHECK_WRITE(&cpu_state.seg_es, DI, DI);
high_page = 0;
do_mmut_rb(cpu_state.ea_seg->base, SI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_wb(es, DI, &addr64_2);
if (cpu_state.abrt)
return 1;
temp = readmemb_n(cpu_state.ea_seg->base, SI, addr64);
if (cpu_state.abrt)
return 1;
writememb_n(es, DI, addr64_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
DI--;
SI--;
} else {
DI++;
SI++;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opMOVSB_a32(uint32_t fetchdat)
{
uint8_t temp;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI);
high_page = 0;
do_mmut_rb(cpu_state.ea_seg->base, ESI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_wb(es, EDI, &addr64_2);
if (cpu_state.abrt)
return 1;
temp = readmemb_n(cpu_state.ea_seg->base, ESI, addr64);
if (cpu_state.abrt)
return 1;
writememb_n(es, EDI, addr64_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
EDI--;
ESI--;
} else {
EDI++;
ESI++;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opMOVSW_a16(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 1UL);
high_page = 0;
do_mmut_rw(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_ww(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmemw_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
writememw_n(es, DI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
DI -= 2;
SI -= 2;
} else {
DI += 2;
SI += 2;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opMOVSW_a32(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 1UL);
high_page = 0;
do_mmut_rw(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_ww(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmemw_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
writememw_n(es, EDI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
EDI -= 2;
ESI -= 2;
} else {
EDI += 2;
ESI += 2;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opMOVSL_a16(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 3UL);
high_page = 0;
do_mmut_rl(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_wl(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmeml_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
writememl_n(es, DI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
DI -= 4;
SI -= 4;
} else {
DI += 4;
SI += 4;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 1, 0);
return 0;
}
static int
opMOVSL_a32(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 3UL);
high_page = 0;
do_mmut_rl(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_wl(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
temp = readmeml_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
writememl_n(es, EDI, addr64a_2, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG) {
EDI -= 4;
ESI -= 4;
} else {
EDI += 4;
ESI += 4;
}
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 1, 1);
return 0;
}
static int
opCMPSB_a16(uint32_t fetchdat)
{
uint8_t src;
uint8_t dst;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI);
CHECK_READ(&cpu_state.seg_es, DI, DI);
high_page = uncached = 0;
do_mmut_rb(cpu_state.ea_seg->base, SI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_rb2(es, DI, &addr64_2);
if (cpu_state.abrt)
return 1;
src = readmemb_n(cpu_state.ea_seg->base, SI, addr64);
if (cpu_state.abrt)
return 1;
dst = readmemb_n(es, DI, addr64_2);
if (cpu_state.abrt)
return 1;
setsub8(src, dst);
if (cpu_state.flags & D_FLAG) {
DI--;
SI--;
} else {
DI++;
SI++;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 0);
return 0;
}
static int
opCMPSB_a32(uint32_t fetchdat)
{
uint8_t src;
uint8_t dst;
addr64 = addr64_2 = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
CHECK_READ(&cpu_state.seg_es, EDI, EDI);
high_page = uncached = 0;
do_mmut_rb(cpu_state.ea_seg->base, ESI, &addr64);
if (cpu_state.abrt)
return 1;
do_mmut_rb2(es, EDI, &addr64_2);
if (cpu_state.abrt)
return 1;
src = readmemb_n(cpu_state.ea_seg->base, ESI, addr64);
if (cpu_state.abrt)
return 1;
dst = readmemb_n(es, EDI, addr64_2);
if (cpu_state.abrt)
return 1;
setsub8(src, dst);
if (cpu_state.flags & D_FLAG) {
EDI--;
ESI--;
} else {
EDI++;
ESI++;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 1);
return 0;
}
static int
opCMPSW_a16(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
CHECK_READ(&cpu_state.seg_es, DI, DI + 1UL);
high_page = uncached = 0;
do_mmut_rw(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rw2(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmemw_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
dst = readmemw_n(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
setsub16(src, dst);
if (cpu_state.flags & D_FLAG) {
DI -= 2;
SI -= 2;
} else {
DI += 2;
SI += 2;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 0);
return 0;
}
static int
opCMPSW_a32(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
addr64a[0] = addr64a[1] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 1UL);
high_page = uncached = 0;
do_mmut_rw(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rw2(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmemw_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
dst = readmemw_n(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
setsub16(src, dst);
if (cpu_state.flags & D_FLAG) {
EDI -= 2;
ESI -= 2;
} else {
EDI += 2;
ESI += 2;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 2, 0, 0, 0, 1);
return 0;
}
static int
opCMPSL_a16(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
CHECK_READ(&cpu_state.seg_es, DI, DI + 3UL);
high_page = uncached = 0;
do_mmut_rl(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rl2(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmeml_n(cpu_state.ea_seg->base, SI, addr64a);
if (cpu_state.abrt)
return 1;
dst = readmeml_n(es, DI, addr64a_2);
if (cpu_state.abrt)
return 1;
setsub32(src, dst);
if (cpu_state.flags & D_FLAG) {
DI -= 4;
SI -= 4;
} else {
DI += 4;
SI += 4;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 0, 2, 0, 0, 0);
return 0;
}
static int
opCMPSL_a32(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000;
SEG_CHECK_READ(cpu_state.ea_seg);
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 3UL);
high_page = uncached = 0;
do_mmut_rl(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
do_mmut_rl2(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
src = readmeml_n(cpu_state.ea_seg->base, ESI, addr64a);
if (cpu_state.abrt)
return 1;
dst = readmeml_n(es, EDI, addr64a_2);
if (cpu_state.abrt)
return 1;
setsub32(src, dst);
if (cpu_state.flags & D_FLAG) {
EDI -= 4;
ESI -= 4;
} else {
EDI += 4;
ESI += 4;
}
CLOCK_CYCLES((is486) ? 8 : 10);
PREFETCH_RUN((is486) ? 8 : 10, 1, -1, 0, 2, 0, 0, 1);
return 0;
}
static int
opSTOSB_a16(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, DI, DI);
writememb(es, DI, AL);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI--;
else
DI++;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0);
return 0;
}
static int
opSTOSB_a32(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI);
writememb(es, EDI, AL);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI--;
else
EDI++;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 1);
return 0;
}
static int
opSTOSW_a16(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 1UL);
writememw(es, DI, AX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 2;
else
DI += 2;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0);
return 0;
}
static int
opSTOSW_a32(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 1UL);
writememw(es, EDI, AX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 2;
else
EDI += 2;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 1);
return 0;
}
static int
opSTOSL_a16(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 3UL);
writememl(es, DI, EAX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 4;
else
DI += 4;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 0);
return 0;
}
static int
opSTOSL_a32(uint32_t fetchdat)
{
SEG_CHECK_WRITE(&cpu_state.seg_es);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 3UL);
writememl(es, EDI, EAX);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 4;
else
EDI += 4;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 1);
return 0;
}
static int
opLODSB_a16(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI);
temp = readmemb(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
AL = temp;
if (cpu_state.flags & D_FLAG)
SI--;
else
SI++;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opLODSB_a32(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
temp = readmemb(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
AL = temp;
if (cpu_state.flags & D_FLAG)
ESI--;
else
ESI++;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opLODSW_a16(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
AX = temp;
if (cpu_state.flags & D_FLAG)
SI -= 2;
else
SI += 2;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opLODSW_a32(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
AX = temp;
if (cpu_state.flags & D_FLAG)
ESI -= 2;
else
ESI += 2;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opLODSL_a16(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
EAX = temp;
if (cpu_state.flags & D_FLAG)
SI -= 4;
else
SI += 4;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 0);
return 0;
}
static int
opLODSL_a32(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
EAX = temp;
if (cpu_state.flags & D_FLAG)
ESI -= 4;
else
ESI += 4;
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 1);
return 0;
}
static int
opSCASB_a16(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, DI, DI);
temp = readmemb(es, DI);
if (cpu_state.abrt)
return 1;
setsub8(AL, temp);
if (cpu_state.flags & D_FLAG)
DI--;
else
DI++;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opSCASB_a32(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, EDI, EDI);
temp = readmemb(es, EDI);
if (cpu_state.abrt)
return 1;
setsub8(AL, temp);
if (cpu_state.flags & D_FLAG)
EDI--;
else
EDI++;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opSCASW_a16(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, DI, DI + 1UL);
temp = readmemw(es, DI);
if (cpu_state.abrt)
return 1;
setsub16(AX, temp);
if (cpu_state.flags & D_FLAG)
DI -= 2;
else
DI += 2;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opSCASW_a32(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 1UL);
temp = readmemw(es, EDI);
if (cpu_state.abrt)
return 1;
setsub16(AX, temp);
if (cpu_state.flags & D_FLAG)
EDI -= 2;
else
EDI += 2;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 1, 0, 0, 0, 1);
return 0;
}
static int
opSCASL_a16(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, DI, DI + 3UL);
temp = readmeml(es, DI);
if (cpu_state.abrt)
return 1;
setsub32(EAX, temp);
if (cpu_state.flags & D_FLAG)
DI -= 4;
else
DI += 4;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 0, 0);
return 0;
}
static int
opSCASL_a32(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(&cpu_state.seg_es);
CHECK_READ(&cpu_state.seg_es, EDI, EDI + 3UL);
temp = readmeml(es, EDI);
if (cpu_state.abrt)
return 1;
setsub32(EAX, temp);
if (cpu_state.flags & D_FLAG)
EDI -= 4;
else
EDI += 4;
CLOCK_CYCLES(7);
PREFETCH_RUN(7, 1, -1, 0, 1, 0, 0, 1);
return 0;
}
static int
opINSB_a16(uint32_t fetchdat)
{
uint8_t temp;
addr64 = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 1);
CHECK_WRITE(&cpu_state.seg_es, DI, DI);
high_page = 0;
do_mmut_wb(es, DI, &addr64);
if (cpu_state.abrt)
return 1;
temp = inb(DX);
writememb_n(es, DI, addr64, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI--;
else
DI++;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opINSB_a32(uint32_t fetchdat)
{
uint8_t temp;
addr64 = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 1);
high_page = 0;
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI);
do_mmut_wb(es, EDI, &addr64);
if (cpu_state.abrt)
return 1;
temp = inb(DX);
writememb_n(es, EDI, addr64, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI--;
else
EDI++;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opINSW_a16(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 2);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 1UL);
high_page = 0;
do_mmut_ww(es, DI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inw(DX);
writememw_n(es, DI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 2;
else
DI += 2;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opINSW_a32(uint32_t fetchdat)
{
uint16_t temp;
addr64a[0] = addr64a[1] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
high_page = 0;
check_io_perm(DX, 2);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 1UL);
do_mmut_ww(es, EDI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inw(DX);
writememw_n(es, EDI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 2;
else
EDI += 2;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opINSL_a16(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 4);
CHECK_WRITE(&cpu_state.seg_es, DI, DI + 3UL);
high_page = 0;
do_mmut_wl(es, DI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inl(DX);
writememl_n(es, DI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
DI -= 4;
else
DI += 4;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 0, 1, 0, 1, 0);
return 0;
}
static int
opINSL_a32(uint32_t fetchdat)
{
uint32_t temp;
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000;
SEG_CHECK_WRITE(&cpu_state.seg_es);
check_io_perm(DX, 4);
CHECK_WRITE(&cpu_state.seg_es, EDI, EDI + 3UL);
high_page = 0;
do_mmut_wl(es, DI, addr64a);
if (cpu_state.abrt)
return 1;
temp = inl(DX);
writememl_n(es, EDI, addr64a, temp);
if (cpu_state.abrt)
return 1;
if (cpu_state.flags & D_FLAG)
EDI -= 4;
else
EDI += 4;
CLOCK_CYCLES(15);
PREFETCH_RUN(15, 1, -1, 0, 1, 0, 1, 1);
return 0;
}
static int
opOUTSB_a16(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI);
temp = readmemb(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 1);
if (cpu_state.flags & D_FLAG)
SI--;
else
SI++;
outb(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opOUTSB_a32(uint32_t fetchdat)
{
uint8_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI);
temp = readmemb(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 1);
if (cpu_state.flags & D_FLAG)
ESI--;
else
ESI++;
outb(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opOUTSW_a16(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 2);
if (cpu_state.flags & D_FLAG)
SI -= 2;
else
SI += 2;
outw(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 0);
return 0;
}
static int
opOUTSW_a32(uint32_t fetchdat)
{
uint16_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 1UL);
temp = readmemw(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 2);
if (cpu_state.flags & D_FLAG)
ESI -= 2;
else
ESI += 2;
outw(DX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 1, 0, 1, 0, 1);
return 0;
}
static int
opOUTSL_a16(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, SI, SI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, SI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 4);
if (cpu_state.flags & D_FLAG)
SI -= 4;
else
SI += 4;
outl(EDX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 0, 1, 0, 1, 0);
return 0;
}
static int
opOUTSL_a32(uint32_t fetchdat)
{
uint32_t temp;
SEG_CHECK_READ(cpu_state.ea_seg);
CHECK_READ(cpu_state.ea_seg, ESI, ESI + 3UL);
temp = readmeml(cpu_state.ea_seg->base, ESI);
if (cpu_state.abrt)
return 1;
check_io_perm(DX, 4);
if (cpu_state.flags & D_FLAG)
ESI -= 4;
else
ESI += 4;
outl(EDX, temp);
CLOCK_CYCLES(14);
PREFETCH_RUN(14, 1, -1, 0, 1, 0, 1, 1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_string_2386.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 9,148 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* x86 CPU segment emulation.
*
*
*
* Authors: Sarah Walker, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <wchar.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include <86box/device.h>
#include <86box/timer.h>
#include <86box/machine.h>
#include <86box/mem.h>
#include <86box/nvr.h>
#include <86box/plat_fallthrough.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_flags.h"
#include "x86seg.h"
#include "x86seg_common.h"
#include "386_common.h"
#ifdef OPS_286_386
#define seg_readmembl readmembl_2386
#define seg_readmemwl readmemwl_2386
#define seg_readmemll readmemll_2386
#define seg_writemembl writemembl_2386
#define seg_writememwl writememwl_2386
#define seg_writememll writememll_2386
#else
#define seg_readmembl readmembl_2386
#define seg_readmemwl readmemwl_2386
#define seg_readmemll readmemll_2386
#define seg_writemembl writemembl_2386
#define seg_writememwl writememwl_2386
#define seg_writememll writememll_2386
#endif
#define DPL ((segdat[2] >> 13) & 3)
#define DPL2 ((segdat2[2] >> 13) & 3)
#define DPL3 ((segdat3[2] >> 13) & 3)
#ifdef ENABLE_X86SEG_LOG
int x86seg_do_log = ENABLE_X86SEG_LOG;
static void
x86seg_log(const char *fmt, ...)
{
va_list ap;
if (x86seg_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define x86seg_log(fmt, ...)
#endif
#ifdef USE_DYNAREC
extern int cpu_block_end;
#endif
void
#ifdef OPS_286_386
x86_doabrt_2386(int x86_abrt)
#else
x86_doabrt(int x86_abrt)
#endif
{
#ifndef USE_NEW_DYNAREC
CS = oldcs;
#endif
cpu_state.pc = cpu_state.oldpc;
cpu_state.seg_cs.access = (oldcpl << 5) | 0x80;
cpu_state.seg_cs.ar_high = 0x10;
if (msw & 1)
op_pmodeint(x86_abrt, 0);
else {
uint32_t addr = (x86_abrt << 2) + idt.base;
if (stack32) {
writememw(ss, ESP - 2, cpu_state.flags);
writememw(ss, ESP - 4, CS);
writememw(ss, ESP - 6, cpu_state.pc);
ESP -= 6;
} else {
writememw(ss, ((SP - 2) & 0xffff), cpu_state.flags);
writememw(ss, ((SP - 4) & 0xffff), CS);
writememw(ss, ((SP - 6) & 0xffff), cpu_state.pc);
SP -= 6;
}
cpu_state.flags &= ~(I_FLAG | T_FLAG);
#ifndef USE_NEW_DYNAREC
oxpc = cpu_state.pc;
#endif
cpu_state.pc = readmemw(0, addr);
op_loadcs(readmemw(0, addr + 2));
return;
}
if (cpu_state.abrt || x86_was_reset)
return;
if (intgatesize == 16) {
if (stack32) {
writememw(ss, ESP - 2, abrt_error);
ESP -= 2;
} else {
writememw(ss, ((SP - 2) & 0xffff), abrt_error);
SP -= 2;
}
} else {
if (stack32) {
writememl(ss, ESP - 4, abrt_error);
ESP -= 4;
} else {
writememl(ss, ((SP - 4) & 0xffff), abrt_error);
SP -= 4;
}
}
}
static void
set_stack32(int s)
{
stack32 = s;
if (stack32)
cpu_cur_status |= CPU_STATUS_STACK32;
else
cpu_cur_status &= ~CPU_STATUS_STACK32;
}
static void
set_use32(int u)
{
use32 = u ? 0x300 : 0;
if (u)
cpu_cur_status |= CPU_STATUS_USE32;
else
cpu_cur_status &= ~CPU_STATUS_USE32;
}
#ifndef OPS_286_386
void
do_seg_load(x86seg *s, uint16_t *segdat)
{
s->limit = segdat[0] | ((segdat[3] & 0x000f) << 16);
if (segdat[3] & 0x0080)
s->limit = (s->limit << 12) | 0xfff;
s->base = segdat[1] | ((segdat[2] & 0x00ff) << 16);
if (is386)
s->base |= ((segdat[3] >> 8) << 24);
s->access = segdat[2] >> 8;
s->ar_high = segdat[3] & 0xff;
if (((segdat[2] & 0x1800) != 0x1000) || !(segdat[2] & (1 << 10))) {
/* Expand-down */
s->limit_high = s->limit;
s->limit_low = 0;
} else {
s->limit_high = (segdat[3] & 0x40) ? 0xffffffff : 0xffff;
s->limit_low = s->limit + 1;
}
if (s == &cpu_state.seg_ds) {
if ((s->base == 0) && (s->limit_low == 0) && (s->limit_high == 0xffffffff))
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
}
if (s == &cpu_state.seg_ss) {
if ((s->base == 0) && (s->limit_low == 0) && (s->limit_high == 0xffffffff))
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
}
}
#endif
static void
do_seg_v86_init(x86seg *s)
{
s->access = 0xe2;
s->ar_high = 0x10;
s->limit = 0xffff;
s->limit_low = 0;
s->limit_high = 0xffff;
}
static void
check_seg_valid(x86seg *s)
{
int dpl = (s->access >> 5) & 3;
int valid = 1;
const x86seg *dt = (s->seg & 0x0004) ? &ldt : &gdt;
if (((s->seg & 0xfff8UL) + 7UL) > dt->limit)
valid = 0;
switch (s->access & 0x1f) {
case 0x10:
case 0x11:
case 0x12:
case 0x13: /* Data segments */
case 0x14:
case 0x15:
case 0x16:
case 0x17:
case 0x1a:
case 0x1b: /* Readable non-conforming code */
if (((s->seg & 3) > dpl) || ((CPL) > dpl)) {
valid = 0;
break;
}
break;
case 0x1e:
case 0x1f: /* Readable conforming code */
break;
default:
valid = 0;
break;
}
if (!valid)
op_loadseg(0, s);
}
static void
read_descriptor(uint32_t addr, uint16_t *segdat, uint32_t *segdat32, int override)
{
if (override)
cpl_override = 1;
if (cpu_16bitbus) {
segdat[0] = readmemw(0, addr);
segdat[1] = readmemw(0, addr + 2);
segdat[2] = readmemw(0, addr + 4);
segdat[3] = readmemw(0, addr + 6);
} else {
segdat32[0] = readmeml(0, addr);
segdat32[1] = readmeml(0, addr + 4);
}
if (override)
cpl_override = 0;
}
#ifdef USE_NEW_DYNAREC
int
#else
void
#endif
#ifdef OPS_286_386
loadseg_2386(uint16_t seg, x86seg *s)
#else
loadseg(uint16_t seg, x86seg *s)
#endif
{
uint16_t segdat[4];
uint32_t addr;
uint32_t *segdat32 = (uint32_t *) segdat;
int dpl;
const x86seg *dt;
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) {
if (!(seg & 0xfffc)) {
if (s == &cpu_state.seg_ss) {
x86ss(NULL, 0);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
s->seg = 0;
s->access = 0x80;
s->ar_high = 0x10;
s->base = -1;
if (s == &cpu_state.seg_ds)
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
#ifdef USE_NEW_DYNAREC
return 0;
#else
return;
#endif
}
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("loadseg(): Bigger than LDT limit", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt)
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
dpl = (segdat[2] >> 13) & 3;
if (s == &cpu_state.seg_ss) {
if (!(seg & 0xfffc)) {
x86gpf("loadseg(): Zero stack segment", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
if ((seg & 0x0003) != CPL) {
x86gpf("loadseg(): Stack segment RPL != CPL", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
if (dpl != CPL) {
x86gpf("loadseg(): Stack segment DPL != CPL", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
switch ((segdat[2] >> 8) & 0x1f) {
case 0x12:
case 0x13:
case 0x16:
case 0x17:
/* R/W */
break;
default:
x86gpf("loadseg(): Unknown stack segment type", seg & ~3);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
if (!(segdat[2] & 0x8000)) {
x86ss(NULL, seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
set_stack32((segdat[3] & 0x40) ? 1 : 0);
} else if (s != &cpu_state.seg_cs) {
x86seg_log("Seg data %04X %04X %04X %04X\n", segdat[0], segdat[1], segdat[2], segdat[3]);
x86seg_log("Seg type %03X\n", segdat[2] & 0x1f00);
switch ((segdat[2] >> 8) & 0x1f) {
case 0x10:
case 0x11:
case 0x12:
case 0x13: /* Data segments */
case 0x14:
case 0x15:
case 0x16:
case 0x17:
case 0x1a:
case 0x1b: /* Readable non-conforming code */
if ((seg & 0x0003) > dpl) {
x86gpf("loadseg(): Normal segment RPL > DPL", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
if ((CPL) > dpl) {
x86gpf("loadseg(): Normal segment DPL < CPL", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
break;
case 0x1e:
case 0x1f: /* Readable conforming code */
break;
default:
x86gpf("loadseg(): Unknown normal segment type", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
}
if (!(segdat[2] & 0x8000)) {
x86np("Load data seg not present", seg & 0xfffc);
#ifdef USE_NEW_DYNAREC
return 1;
#else
return;
#endif
}
s->seg = seg;
do_seg_load(s, segdat);
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
s->checked = 0;
#ifdef USE_DYNAREC
if (s == &cpu_state.seg_ds)
codegen_flat_ds = 0;
if (s == &cpu_state.seg_ss)
codegen_flat_ss = 0;
#endif
} else {
s->access = 0xe2;
s->ar_high = 0x10;
s->base = seg << 4;
s->seg = seg;
s->checked = 1;
#ifdef USE_DYNAREC
if (s == &cpu_state.seg_ds)
codegen_flat_ds = 0;
if (s == &cpu_state.seg_ss)
codegen_flat_ss = 0;
#endif
if (s == &cpu_state.seg_ss && (cpu_state.eflags & VM_FLAG))
set_stack32(0);
}
if (s == &cpu_state.seg_ds) {
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
}
if (s == &cpu_state.seg_ss) {
if (s->base == 0 && s->limit_low == 0 && s->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
}
#ifdef USE_NEW_DYNAREC
return cpu_state.abrt;
#endif
}
void
#ifdef OPS_286_386
loadcs_2386(uint16_t seg)
#else
loadcs(uint16_t seg)
#endif
{
uint16_t segdat[4];
uint32_t addr;
uint32_t *segdat32 = (uint32_t *) segdat;
const x86seg *dt;
x86seg_log("Load CS %04X\n", seg);
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) {
if (!(seg & 0xfffc)) {
x86gpf("loadcs(): Protected mode selector is zero", 0);
return;
}
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("loadcs(): Protected mode selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt)
return;
if (segdat[2] & 0x1000) {
/* Normal code segment */
if (!(segdat[2] & 0x0400)) {
/* Not conforming */
if ((seg & 3) > CPL) {
x86gpf("loadcs(): Non-conforming RPL > CPL", seg & 0xfffc);
return;
}
if (CPL != DPL) {
x86gpf("loadcs(): Non-conforming CPL != DPL", seg & 0xfffc);
return;
}
}
if (CPL < DPL) {
x86gpf("loadcs(): CPL < DPL", seg & ~3);
return;
}
if (!(segdat[2] & 0x8000)) {
x86np("Load CS not present", seg & 0xfffc);
return;
}
set_use32(segdat[3] & 0x40);
CS = (seg & 0xfffc) | CPL;
do_seg_load(&cpu_state.seg_cs, segdat);
use32 = (segdat[3] & 0x40) ? 0x300 : 0;
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x0100); /* Set accessed bit */
cpl_override = 0;
} else {
/* System segment */
if (!(segdat[2] & 0x8000)) {
x86np("Load CS system seg not present", seg & 0xfffc);
return;
}
switch (segdat[2] & 0x0f00) {
default:
x86gpf("Load CS system segment has bits 0-3 of access rights set", seg & 0xfffc);
return;
}
}
} else {
cpu_state.seg_cs.base = (seg << 4);
cpu_state.seg_cs.limit = 0xffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
cpu_state.seg_cs.seg = seg & 0xffff;
cpu_state.seg_cs.access = (cpu_state.eflags & VM_FLAG) ? 0xe2 : 0x82;
cpu_state.seg_cs.ar_high = 0x10;
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
}
}
void
#ifdef OPS_286_386
loadcsjmp_2386(uint16_t seg, uint32_t old_pc)
#else
loadcsjmp(uint16_t seg, uint32_t old_pc)
#endif
{
uint16_t type;
uint16_t seg2;
uint16_t segdat[4];
uint32_t addr;
uint32_t newpc;
uint32_t *segdat32 = (uint32_t *) segdat;
const x86seg *dt;
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) {
if (!(seg & 0xfffc)) {
x86gpf("loadcsjmp(): Selector is zero", 0);
return;
}
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("loacsjmp(): Selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt)
return;
x86seg_log("%04X %04X %04X %04X\n", segdat[0], segdat[1], segdat[2], segdat[3]);
if (segdat[2] & 0x1000) {
/* Normal code segment */
if (!(segdat[2] & 0x0400)) {
/* Not conforming */
if ((seg & 0x0003) > CPL) {
x86gpf("loadcsjmp(): segment PL > CPL", seg & 0xfffc);
return;
}
if (CPL != DPL) {
x86gpf("loadcsjmp(): CPL != DPL", seg & 0xfffc);
return;
}
}
if (CPL < DPL) {
x86gpf("loadcsjmp(): CPL < DPL", seg & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
x86np("Load CS JMP not present", seg & 0xfffc);
return;
}
set_use32(segdat[3] & 0x0040);
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x0100); /* Set accessed bit */
cpl_override = 0;
CS = (seg & 0xfffc) | CPL;
segdat[2] = (segdat[2] & ~(3 << 13)) | (CPL << 13);
do_seg_load(&cpu_state.seg_cs, segdat);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
cycles -= timing_jmp_pm;
} else { /* System segment */
if (!(segdat[2] & 0x8000)) {
x86np("Load CS JMP system selector not present", seg & 0xfffc);
return;
}
type = segdat[2] & 0x0f00;
newpc = segdat[0];
if (type & 0x0800)
newpc |= (segdat[3] << 16);
switch (type) {
case 0x0400: /* Call gate */
case 0x0c00:
cgate32 = (type & 0x0800);
cgate16 = !cgate32;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
if (DPL < CPL) {
x86gpf("loadcsjmp(): Call gate DPL < CPL", seg & 0xfffc);
return;
}
if (DPL < (seg & 0x0003)) {
x86gpf("loadcsjmp(): Call gate DPL< RPL", seg & ~3);
return;
}
if (!(segdat[2] & 0x8000)) {
x86np("Load CS JMP call gate not present", seg & 0xfffc);
return;
}
seg2 = segdat[1];
if (!(seg2 & 0xfffc)) {
x86gpf("Load CS JMP call gate selector is NULL", 0);
return;
}
addr = seg2 & 0xfff8;
dt = (seg2 & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("loadcsjmp(): Call gate selector > DT limit", seg2 & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt)
return;
if (DPL > CPL) {
x86gpf("loadcsjmp(): ex DPL > CPL", seg2 & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
x86np("Load CS JMP from call gate not present", seg2 & 0xfffc);
return;
}
switch (segdat[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming code */
if (DPL > CPL) {
x86gpf("loadcsjmp(): Non-conforming DPL > CPL", seg2 & 0xfffc);
return;
}
fallthrough;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
CS = seg2;
do_seg_load(&cpu_state.seg_cs, segdat);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x40);
cpu_state.pc = newpc;
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
break;
default:
x86gpf("loadcsjmp(): Unknown type", seg2 & 0xfffc);
return;
}
cycles -= timing_jmp_pm_gate;
break;
case 0x100: /* 286 Task gate */
case 0x900: /* 386 Task gate */
cpu_state.pc = old_pc;
optype = JMP;
cpl_override = 1;
op_taskswitch286(seg, segdat, segdat[2] & 0x800);
cpu_state.flags &= ~NT_FLAG;
cpl_override = 0;
return;
default:
x86gpf("Load CS JMP call gate selector unknown type", 0);
return;
}
}
} else {
cpu_state.seg_cs.base = seg << 4;
cpu_state.seg_cs.limit = 0xffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
cpu_state.seg_cs.seg = seg;
cpu_state.seg_cs.access = (cpu_state.eflags & VM_FLAG) ? 0xe2 : 0x82;
cpu_state.seg_cs.ar_high = 0x10;
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
cycles -= timing_jmp_rm;
}
}
static void
PUSHW(uint16_t v)
{
if (stack32) {
writememw(ss, ESP - 2, v);
if (cpu_state.abrt)
return;
ESP -= 2;
} else {
writememw(ss, ((SP - 2) & 0xffff), v);
if (cpu_state.abrt)
return;
SP -= 2;
}
}
static void
PUSHL(uint32_t v)
{
if (cpu_16bitbus) {
PUSHW(v >> 16);
PUSHW(v & 0xffff);
} else {
if (stack32) {
writememl(ss, ESP - 4, v);
if (cpu_state.abrt)
return;
ESP -= 4;
} else {
writememl(ss, ((SP - 4) & 0xffff), v);
if (cpu_state.abrt)
return;
SP -= 4;
}
}
}
static void
PUSHL_SEL(uint32_t v)
{
if (cpu_16bitbus) {
PUSHW(v >> 16);
PUSHW(v & 0xffff);
} else {
if (stack32) {
writememw(ss, ESP - 4, v);
if (cpu_state.abrt)
return;
ESP -= 4;
} else {
writememw(ss, ((SP - 4) & 0xffff), v);
if (cpu_state.abrt)
return;
SP -= 4;
}
}
}
static uint16_t
POPW(void)
{
uint16_t tempw;
if (stack32) {
tempw = readmemw(ss, ESP);
if (cpu_state.abrt)
return 0;
ESP += 2;
} else {
tempw = readmemw(ss, SP);
if (cpu_state.abrt)
return 0;
SP += 2;
}
return tempw;
}
static uint32_t
POPL(void)
{
uint32_t templ;
if (cpu_16bitbus) {
templ = POPW();
templ |= (POPW() << 16);
} else {
if (stack32) {
templ = readmeml(ss, ESP);
if (cpu_state.abrt)
return 0;
ESP += 4;
} else {
templ = readmeml(ss, SP);
if (cpu_state.abrt)
return 0;
SP += 4;
}
}
return templ;
}
#ifdef OPS_286_386
#ifdef USE_NEW_DYNAREC
void
loadcscall_2386(uint16_t seg, uint32_t old_pc)
#else
void
loadcscall_2386(uint16_t seg)
#endif
#else
#ifdef USE_NEW_DYNAREC
void
loadcscall(uint16_t seg, uint32_t old_pc)
#else
void
loadcscall(uint16_t seg)
#endif
#endif
{
uint16_t seg2;
uint16_t newss;
uint16_t segdat[4];
uint16_t segdat2[4];
uint32_t addr;
uint32_t oldssbase = ss;
uint32_t oaddr;
uint32_t newpc;
uint32_t *segdat32 = (uint32_t *) segdat;
uint32_t *segdat232 = (uint32_t *) segdat2;
int count;
int type;
uint32_t oldss;
uint32_t oldsp;
uint32_t newsp;
uint32_t oldsp2;
uint32_t oldss_limit_high = cpu_state.seg_ss.limit_high;
const x86seg *dt;
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) {
x86seg_log("Protected mode CS load! %04X\n", seg);
if (!(seg & 0xfffc)) {
x86gpf("loadcscall(): Protected mode selector is zero", 0);
return;
}
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("loadcscall(): Selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt)
return;
type = segdat[2] & 0x0f00;
newpc = segdat[0];
if (type & 0x0800)
newpc |= segdat[3] << 16;
x86seg_log("Code seg call - %04X - %04X %04X %04X\n", seg, segdat[0], segdat[1], segdat[2]);
if (segdat[2] & 0x1000) {
if (!(segdat[2] & 0x0400)) { /* Not conforming */
if ((seg & 0x0003) > CPL) {
x86gpf("loadcscall(): Non-conforming RPL > CPL", seg & 0xfffc);
return;
}
if (CPL != DPL) {
x86gpf("loadcscall(): Non-conforming CPL != DPL", seg & 0xfffc);
return;
}
}
if (CPL < DPL) {
x86gpf("loadcscall(): CPL < DPL", seg & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
x86np("Load CS call not present", seg & 0xfffc);
return;
}
set_use32(segdat[3] & 0x0040);
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
/* Conforming segments don't change CPL, so preserve existing CPL */
if (segdat[2] & 0x0400) {
seg = (seg & 0xfffc) | CPL;
segdat[2] = (segdat[2] & ~(3 << (5 + 8))) | (CPL << (5 + 8));
} else /* On non-conforming segments, set RPL = CPL */
seg = (seg & 0xfffc) | CPL;
CS = seg;
do_seg_load(&cpu_state.seg_cs, segdat);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
#ifdef ENABLE_X86SEG_LOG
x86seg_log("Complete\n");
#endif
cycles -= timing_call_pm;
} else {
type = segdat[2] & 0x0f00;
x86seg_log("Type %03X\n", type);
switch (type) {
case 0x0400: /* Call gate */
case 0x0c00: /* 386 Call gate */
x86seg_log("Callgate %08X\n", cpu_state.pc);
cgate32 = (type & 0x0800);
cgate16 = !cgate32;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
count = segdat[2] & 0x001f;
if (DPL < CPL) {
x86gpf("loadcscall(): ex DPL < CPL", seg & 0xfffc);
return;
}
if (DPL < (seg & 0x0003)) {
x86gpf("loadcscall(): ex DPL < RPL", seg & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
x86np("Call gate not present", seg & 0xfffc);
return;
}
seg2 = segdat[1];
x86seg_log("New address : %04X:%08X\n", seg2, newpc);
if (!(seg2 & 0xfffc)) {
x86gpf("loadcscall(): ex selector is NULL", 0);
return;
}
addr = seg2 & 0xfff8;
dt = (seg2 & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("loadcscall(): ex Selector > DT limit", seg2 & 0xfff8);
return;
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt)
return;
x86seg_log("Code seg2 call - %04X - %04X %04X %04X\n", seg2, segdat[0], segdat[1], segdat[2]);
if (DPL > CPL) {
x86gpf("loadcscall(): ex DPL > CPL", seg2 & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
x86seg_log("Call gate CS not present %04X\n", seg2);
x86np("Call gate CS not present", seg2 & 0xfffc);
return;
}
switch (segdat[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming code */
if (DPL < CPL) {
#ifdef USE_NEW_DYNAREC
uint16_t oldcs = CS;
#endif
oaddr = addr;
/* Load new stack */
oldss = SS;
oldsp = oldsp2 = ESP;
cpl_override = 1;
if (tr.access & 8) {
addr = 4 + tr.base + (DPL << 3);
newss = readmemw(0, addr + 4);
if (cpu_16bitbus) {
newsp = readmemw(0, addr);
newsp |= (readmemw(0, addr + 2) << 16);
} else
newsp = readmeml(0, addr);
} else {
addr = 2 + tr.base + (DPL * 4);
newss = readmemw(0, addr + 2);
newsp = readmemw(0, addr);
}
cpl_override = 0;
if (cpu_state.abrt)
return;
x86seg_log("New stack %04X:%08X\n", newss, newsp);
if (!(newss & 0xfffc)) {
x86ts(NULL, newss & 0xfffc);
return;
}
addr = newss & 0xfff8;
dt = (newss & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
fatal("Bigger than DT limit %04X %08X %04X CSC SS\n", newss, addr, dt->limit);
x86ts(NULL, newss & ~3);
return;
}
addr += dt->base;
x86seg_log("Read stack seg\n");
read_descriptor(addr, segdat2, segdat232, 1);
if (cpu_state.abrt)
return;
x86seg_log("Read stack seg done!\n");
if (((newss & 0x0003) != DPL) || (DPL2 != DPL)) {
x86ts(NULL, newss & 0xfffc);
return;
}
if ((segdat2[2] & 0x1a00) != 0x1200) {
x86ts("Call gate loading SS unknown type", newss & 0xfffc);
return;
}
if (!(segdat2[2] & 0x8000)) {
x86ss("Call gate loading SS not present", newss & 0xfffc);
return;
}
if (!stack32)
oldsp &= 0xffff;
SS = newss;
set_stack32((segdat2[3] & 0x0040) ? 1 : 0);
if (stack32)
ESP = newsp;
else
SP = newsp;
do_seg_load(&cpu_state.seg_ss, segdat2);
x86seg_log("Set access 1\n");
cpl_override = 1;
writememw(0, addr + 4, segdat2[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
CS = seg2;
do_seg_load(&cpu_state.seg_cs, segdat);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x0040);
cpu_state.pc = newpc;
x86seg_log("Set access 2\n");
cpl_override = 1;
writememw(0, oaddr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
x86seg_log("Type %04X\n", type);
if (type == 0x0c00) {
PUSHL_SEL(oldss);
PUSHL(oldsp2);
if (cpu_state.abrt) {
SS = oldss;
ESP = oldsp2;
#ifdef USE_NEW_DYNAREC
CS = oldcs;
#endif
return;
}
if (count) {
while (count--) {
uint32_t temp_val;
switch (oldss_limit_high - oldsp - (count << 2)) {
default:
case 3:
/* We are at least an entire DWORD away from the limit,
read long. */
PUSHL(readmeml(oldssbase, oldsp + (count << 2)));
break;
case 2:
/* We are 3 bytes away from the limit,
read word + byte. */
temp_val = readmemw(oldssbase, oldsp + (count << 2));
temp_val |= (readmemb(oldssbase, oldsp +
(count << 2) + 2) << 16);
PUSHL(temp_val);
break;
case 1:
/* We are a WORD away from the limit, read word. */
PUSHL(readmemw(oldssbase, oldsp + (count << 2)));
break;
case 0:
/* We are a BYTE away from the limit, read byte. */
PUSHL(readmemb(oldssbase, oldsp + (count << 2)));
break;
}
if (cpu_state.abrt) {
SS = oldss;
ESP = oldsp2;
#ifdef USE_NEW_DYNAREC
CS = oldcs;
#endif
return;
}
}
}
} else {
x86seg_log("Stack %04X\n", SP);
PUSHW(oldss);
x86seg_log("Write SS to %04X:%04X\n", SS, SP);
PUSHW(oldsp2);
if (cpu_state.abrt) {
SS = oldss;
ESP = oldsp2;
#ifdef USE_NEW_DYNAREC
CS = oldcs;
#endif
return;
}
x86seg_log("Write SP to %04X:%04X\n", SS, SP);
if (count) {
while (count--) {
switch (oldss_limit_high - (oldsp & 0xffff) - (count << 1)) {
default:
case 1:
/* We are at least an entire WORD away from the limit,
read word. */
PUSHW(readmemw(oldssbase, (oldsp & 0xffff) +
(count << 1)));
break;
case 0:
/* We are a BYTE away from the limit, read byte. */
PUSHW(readmemb(oldssbase, (oldsp & 0xffff) +
(count << 1)));
break;
}
if (cpu_state.abrt) {
SS = oldss;
ESP = oldsp2;
#ifdef USE_NEW_DYNAREC
CS = oldcs;
#endif
return;
}
}
}
}
cycles -= timing_call_pm_gate_inner;
break;
} else if (DPL > CPL) {
x86gpf("loadcscall(): Call PM Gate Inner DPL > CPL", seg2 & 0xfffc);
return;
}
fallthrough;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
CS = seg2;
do_seg_load(&cpu_state.seg_cs, segdat);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x0040);
cpu_state.pc = newpc;
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
cycles -= timing_call_pm_gate;
break;
default:
x86gpf("loadcscall(): Unknown subtype", seg2 & 0xfffc);
return;
}
break;
case 0x0100: /* 286 Task gate */
case 0x0900: /* 386 Task gate */
#ifdef USE_NEW_DYNAREC
cpu_state.pc = old_pc;
#else
cpu_state.pc = oxpc;
#endif
cpl_override = 1;
op_taskswitch286(seg, segdat, segdat[2] & 0x0800);
cpl_override = 0;
break;
default:
x86gpf("loadcscall(): Unknown type", seg & 0xfffc);
return;
}
}
} else {
cpu_state.seg_cs.base = seg << 4;
cpu_state.seg_cs.limit = 0xffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
cpu_state.seg_cs.seg = seg;
cpu_state.seg_cs.access = (cpu_state.eflags & VM_FLAG) ? 0xe2 : 0x82;
cpu_state.seg_cs.ar_high = 0x10;
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
}
}
void
#ifdef OPS_286_386
pmoderetf_2386(int is32, uint16_t off)
#else
pmoderetf(int is32, uint16_t off)
#endif
{
uint16_t segdat[4];
uint16_t segdat2[4];
uint16_t seg;
uint16_t newss;
uint32_t newpc;
uint32_t newsp;
uint32_t addr;
uint32_t oaddr;
uint32_t oldsp = ESP;
uint32_t *segdat32 = (uint32_t *) segdat;
uint32_t *segdat232 = (uint32_t *) segdat2;
const x86seg *dt;
x86seg_log("RETF %i %04X:%04X %08X %04X\n", is32, CS, cpu_state.pc, cr0, cpu_state.eflags);
if (is32) {
newpc = POPL();
seg = POPL();
} else {
x86seg_log("PC read from %04X:%04X\n", SS, SP);
newpc = POPW();
x86seg_log("CS read from %04X:%04X\n", SS, SP);
seg = POPW();
}
if (cpu_state.abrt)
return;
x86seg_log("Return to %04X:%08X\n", seg, newpc);
if ((seg & 0x0003) < CPL) {
ESP = oldsp;
x86gpf("pmoderetf(): seg < CPL", seg & 0xfffc);
return;
}
if (!(seg & 0xfffc)) {
x86gpf("pmoderetf(): seg is NULL", 0);
return;
}
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("pmoderetf(): Selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
oaddr = addr;
x86seg_log("CPL %i RPL %i %i\n", CPL, seg & 0x0003, is32);
if (stack32)
ESP += off;
else
SP += off;
if (CPL == (seg & 0x0003)) {
x86seg_log("RETF CPL = RPL %04X\n", segdat[2]);
switch (segdat[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming */
if (CPL != DPL) {
ESP = oldsp;
x86gpf("pmoderetf(): Non-conforming CPL != DPL", seg & 0xfffc);
return;
}
break;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
if (CPL < DPL) {
ESP = oldsp;
x86gpf("pmoderetf(): Conforming CPL < DPL", seg & 0xfffc);
return;
}
break;
default:
x86gpf("pmoderetf(): Unknown type", seg & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
ESP = oldsp;
x86np("RETF CS not present", seg & 0xfffc);
return;
}
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
cpu_state.pc = newpc;
if (segdat[2] & 0x0400)
segdat[2] = (segdat[2] & ~(3 << 13)) | ((seg & 3) << 13);
CS = seg;
do_seg_load(&cpu_state.seg_cs, segdat);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~(3 << 5)) | ((CS & 3) << 5);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x0040);
cycles -= timing_retf_pm;
} else {
switch (segdat[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming */
if ((seg & 0x0003) != DPL) {
ESP = oldsp;
x86gpf("pmoderetf(): Non-conforming RPL != DPL", seg & 0xfffc);
return;
}
x86seg_log("RETF non-conforming, %i %i\n", seg & 0x0003, DPL);
break;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
if ((seg & 0x0003) < DPL) {
ESP = oldsp;
x86gpf("pmoderetf(): Conforming RPL < DPL", seg & 0xfffc);
return;
}
x86seg_log("RETF conforming, %i %i\n", seg & 0x0003, DPL);
break;
default:
ESP = oldsp;
x86gpf("pmoderetf(): Unknown type", seg & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
ESP = oldsp;
x86np("RETF CS not present", seg & 0xfffc);
return;
}
if (is32) {
newsp = POPL();
newss = POPL();
if (cpu_state.abrt)
return;
} else {
x86seg_log("SP read from %04X:%04X\n", SS, SP);
newsp = POPW();
x86seg_log("SS read from %04X:%04X\n", SS, SP);
newss = POPW();
if (cpu_state.abrt)
return;
}
x86seg_log("Read new stack : %04X:%04X (%08X)\n", newss, newsp, ldt.base);
if (!(newss & 0xfffc)) {
ESP = oldsp;
x86gpf("pmoderetf(): New SS selector is zero", newss & ~3);
return;
}
addr = newss & 0xfff8;
dt = (newss & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
ESP = oldsp;
x86gpf("pmoderetf(): New SS selector > DT limit", newss & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat2, segdat232, 1);
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
x86seg_log("Segment data %04X %04X %04X %04X\n", segdat2[0], segdat2[1], segdat2[2], segdat2[3]);
if ((newss & 0x0003) != (seg & 0x0003)) {
ESP = oldsp;
x86gpf("pmoderetf(): New SS RPL > CS RPL", newss & 0xfffc);
return;
}
if ((segdat2[2] & 0x1a00) != 0x1200) {
ESP = oldsp;
x86gpf("pmoderetf(): New SS unknown type", newss & 0xfffc);
return;
}
if (!(segdat2[2] & 0x8000)) {
ESP = oldsp;
x86np("RETF loading SS not present", newss & 0xfffc);
return;
}
if (DPL2 != (seg & 3)) {
ESP = oldsp;
x86gpf("pmoderetf(): New SS DPL != CS RPL", newss & 0xfffc);
return;
}
SS = newss;
set_stack32((segdat2[3] & 0x0040) ? 1 : 0);
if (stack32)
ESP = newsp;
else
SP = newsp;
do_seg_load(&cpu_state.seg_ss, segdat2);
cpl_override = 1;
writememw(0, addr + 4, segdat2[2] | 0x100); /* Set accessed bit */
writememw(0, oaddr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
/* Conforming segments don't change CPL, so CPL = RPL */
if (segdat[2] & 0x0400)
segdat[2] = (segdat[2] & ~(3 << 13)) | ((seg & 3) << 13);
cpu_state.pc = newpc;
CS = seg;
do_seg_load(&cpu_state.seg_cs, segdat);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x0040);
if (stack32)
ESP += off;
else
SP += off;
check_seg_valid(&cpu_state.seg_ds);
check_seg_valid(&cpu_state.seg_es);
check_seg_valid(&cpu_state.seg_fs);
check_seg_valid(&cpu_state.seg_gs);
cycles -= timing_retf_pm_outer;
}
}
void
#ifdef OPS_286_386
pmodeint_2386(int num, int soft)
#else
pmodeint(int num, int soft)
#endif
{
uint16_t segdat[4];
uint16_t segdat2[4];
uint16_t segdat3[4];
uint16_t newss;
uint16_t seg = 0;
int type;
int new_cpl;
uint32_t addr;
uint32_t oaddr;
uint32_t oldss;
uint32_t oldsp;
uint32_t newsp;
uint32_t *segdat32 = (uint32_t *) segdat;
uint32_t *segdat232 = (uint32_t *) segdat2;
uint32_t *segdat332 = (uint32_t *) segdat3;
const x86seg *dt;
if ((cpu_state.eflags & VM_FLAG) && (IOPL != 3) && soft) {
x86seg_log("V86 banned int\n");
x86gpf("pmodeint(): V86 banned int", 0);
return;
}
addr = (num << 3);
if ((addr + 7) > idt.limit) {
if (num == 0x08) {
/* Triple fault - reset! */
softresetx86();
cpu_set_edx();
} else if (num == 0x0d)
op_pmodeint(8, 0);
else
x86gpf("pmodeint(): Vector > IDT limit", (num << 3) + 2 + !soft);
x86seg_log("addr >= IDT.limit\n");
return;
}
addr += idt.base;
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt) {
x86seg_log("Abrt reading from %08X\n", addr);
return;
}
oaddr = addr;
x86seg_log("Addr %08X seg %04X %04X %04X %04X\n", addr, segdat[0], segdat[1], segdat[2], segdat[3]);
if (!(segdat[2] & 0x1f00)) {
/* This fires on all V86 interrupts in EMM386. Mark as expected to prevent code churn */
if (cpu_state.eflags & VM_FLAG)
x86gpf_expected("pmodeint(): Expected vector descriptor with bad type", (num << 3) + 2);
else
x86gpf("pmodeint(): Vector descriptor with bad type", (num << 3) + 2);
return;
}
if ((DPL < CPL) && soft) {
x86gpf("pmodeint(): Vector DPL < CPL", (num << 3) + 2);
return;
}
type = segdat[2] & 0x1f00;
if (((type == 0x0e00) || (type == 0x0f00)) && !is386) {
x86gpf("pmodeint(): Gate type illegal on 286", seg & 0xfffc);
return;
}
switch (type) {
case 0x0600:
case 0x0700:
case 0x0e00:
case 0x0f00: /* Interrupt and trap gates */
intgatesize = (type >= 0x0800) ? 32 : 16;
if (!(segdat[2] & 0x8000)) {
x86np("Int gate not present", (num << 3) | 2);
return;
}
seg = segdat[1];
new_cpl = seg & 0x0003;
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("pmodeint(): Interrupt or trap gate selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat2, segdat232, 1);
if (cpu_state.abrt)
return;
oaddr = addr;
if (DPL2 > CPL) {
x86gpf("pmodeint(): Interrupt or trap gate DPL > CPL", seg & 0xfffc);
return;
}
switch (segdat2[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming */
if (DPL2 < CPL) {
if (!(segdat2[2] & 0x8000)) {
x86np("Int gate CS not present", segdat[1] & 0xfffc);
return;
}
if ((cpu_state.eflags & VM_FLAG) && DPL2) {
x86gpf("pmodeint(): Interrupt or trap gate non-zero DPL in V86 mode", segdat[1] & 0xfffc);
return;
}
/* Load new stack */
oldss = SS;
oldsp = ESP;
cpl_override = 1;
if (tr.access & 8) {
addr = 4 + tr.base + (DPL2 << 3);
newss = readmemw(0, addr + 4);
newsp = readmeml(0, addr);
} else {
addr = 2 + tr.base + (DPL2 << 2);
newss = readmemw(0, addr + 2);
newsp = readmemw(0, addr);
}
cpl_override = 0;
if (!(newss & 0xfffc)) {
x86ss("pmodeint(): Interrupt or trap gate stack segment is NULL", newss & 0xfffc);
return;
}
addr = newss & 0xfff8;
dt = (newss & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86ss("pmodeint(): Interrupt or trap gate stack segment > DT", newss & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat3, segdat332, 1);
if (cpu_state.abrt)
return;
if ((newss & 3) != DPL2) {
x86ss("pmodeint(): Interrupt or trap gate tack segment RPL > DPL", newss & 0xfffc);
return;
}
if (DPL3 != DPL2) {
x86ss("pmodeint(): Interrupt or trap gate tack segment DPL > DPL", newss & 0xfffc);
return;
}
if ((segdat3[2] & 0x1a00) != 0x1200) {
x86ss("pmodeint(): Interrupt or trap gate stack segment bad type", newss & 0xfffc);
return;
}
if (!(segdat3[2] & 0x8000)) {
x86np("Int gate loading SS not present", newss & 0xfffc);
return;
}
SS = newss;
set_stack32((segdat3[3] & 0x0040) ? 1 : 0);
if (stack32)
ESP = newsp;
else
SP = newsp;
do_seg_load(&cpu_state.seg_ss, segdat3);
cpl_override = 1;
writememw(0, addr + 4, segdat3[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
x86seg_log("New stack %04X:%08X\n", SS, ESP);
cpl_override = 1;
if (type >= 0x0800) {
if (cpu_state.eflags & VM_FLAG) {
PUSHL_SEL(GS);
PUSHL_SEL(FS);
PUSHL_SEL(DS);
PUSHL_SEL(ES);
if (cpu_state.abrt)
return;
op_loadseg(0, &cpu_state.seg_ds);
op_loadseg(0, &cpu_state.seg_es);
op_loadseg(0, &cpu_state.seg_fs);
op_loadseg(0, &cpu_state.seg_gs);
}
PUSHL_SEL(oldss);
PUSHL(oldsp);
PUSHL(cpu_state.flags | (cpu_state.eflags << 16));
PUSHL_SEL(CS);
PUSHL(cpu_state.pc);
if (cpu_state.abrt)
return;
} else {
PUSHW(oldss);
PUSHW(oldsp);
PUSHW(cpu_state.flags);
PUSHW(CS);
PUSHW(cpu_state.pc);
if (cpu_state.abrt)
return;
}
cpl_override = 0;
cpu_state.seg_cs.access = 0x80;
cycles -= timing_int_pm_outer - timing_int_pm;
break;
} else if (DPL2 != CPL) {
x86gpf("pmodeint(): DPL != CPL", seg & 0xfffc);
return;
}
fallthrough;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
if (!(segdat2[2] & 0x8000)) {
x86np("Int gate CS not present", segdat[1] & 0xfffc);
return;
}
if ((cpu_state.eflags & VM_FLAG) && (DPL2 < CPL)) {
x86gpf("pmodeint(): DPL < CPL in V86 mode", seg & ~0xfffc);
return;
}
if (type > 0x0800) {
PUSHL(cpu_state.flags | (cpu_state.eflags << 16));
PUSHL_SEL(CS);
PUSHL(cpu_state.pc);
if (cpu_state.abrt)
return;
} else {
PUSHW(cpu_state.flags);
PUSHW(CS);
PUSHW(cpu_state.pc);
if (cpu_state.abrt)
return;
}
new_cpl = CS & 3;
break;
default:
x86gpf("pmodeint(): Unknown type", seg & 0xfffc);
return;
}
do_seg_load(&cpu_state.seg_cs, segdat2);
CS = (seg & 0xfffc) | new_cpl;
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~0x60) | (new_cpl << 5);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
if (type > 0x0800)
cpu_state.pc = segdat[0] | (segdat[3] << 16);
else
cpu_state.pc = segdat[0];
set_use32(segdat2[3] & 0x40);
cpl_override = 1;
writememw(0, oaddr + 4, segdat2[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
cpu_state.eflags &= ~VM_FLAG;
cpu_cur_status &= ~CPU_STATUS_V86;
if (!(type & 0x100))
cpu_state.flags &= ~I_FLAG;
cpu_state.flags &= ~(T_FLAG | NT_FLAG);
cycles -= timing_int_pm;
break;
case 0x500: /* Task gate */
seg = segdat[1];
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86gpf("pmodeint(): Task gate selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat2, segdat232, 1);
if (cpu_state.abrt)
return;
if (!(segdat2[2] & 0x8000)) {
x86np("Int task gate not present", segdat[1] & 0xfffc);
return;
}
optype = OPTYPE_INT;
cpl_override = 1;
op_taskswitch286(seg, segdat2, segdat2[2] & 0x0800);
cpl_override = 0;
break;
default:
x86gpf("Protected mode interrupt unknown type", seg & 0xfffc);
return;
}
}
void
#ifdef OPS_286_386
pmodeiret_2386(int is32)
#else
pmodeiret(int is32)
#endif
{
uint16_t newss;
uint16_t seg = 0;
uint16_t segdat[4];
uint16_t segdat2[4];
uint16_t segs[4];
uint32_t tempflags;
uint32_t flagmask;
uint32_t newpc;
uint32_t newsp;
uint32_t addr;
uint32_t oaddr;
uint32_t oldsp = ESP;
uint32_t *segdat32 = (uint32_t *) segdat;
uint32_t *segdat232 = (uint32_t *) segdat2;
const x86seg *dt;
if (is386 && (cpu_state.eflags & VM_FLAG)) {
if (IOPL != 3) {
x86gpf("Protected mode IRET: IOPL != 3", 0);
return;
}
#ifndef USE_NEW_DYNAREC
oxpc = cpu_state.pc;
#endif
if (is32) {
newpc = POPL();
seg = POPL();
tempflags = POPL();
} else {
newpc = POPW();
seg = POPW();
tempflags = POPW();
}
if (cpu_state.abrt)
return;
cpu_state.pc = newpc;
cpu_state.seg_cs.base = seg << 4;
cpu_state.seg_cs.limit = 0xffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
cpu_state.seg_cs.access |= 0x80;
cpu_state.seg_cs.ar_high = 0x10;
CS = seg;
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempflags & 0xcfd5) | 2;
cycles -= timing_iret_rm;
return;
}
if (cpu_state.flags & NT_FLAG) {
cpl_override = 1;
seg = readmemw(tr.base, 0);
cpl_override = 0;
addr = seg & 0xfff8;
if (seg & 0x0004) {
x86seg_log("TS LDT %04X %04X IRET\n", seg, gdt.limit);
x86ts("pmodeiret(): Selector points to LDT", seg & 0xfffc);
return;
} else {
if ((addr + 7) > gdt.limit) {
x86ts(NULL, seg & 0xfffc);
return;
}
addr += gdt.base;
}
read_descriptor(addr, segdat, segdat32, 1);
cpl_override = 1;
op_taskswitch286(seg, segdat, segdat[2] & 0x0800);
cpl_override = 0;
return;
}
#ifndef USE_NEW_DYNAREC
oxpc = cpu_state.pc;
#endif
flagmask = 0xffff;
if (CPL != 0)
flagmask &= ~0x3000;
if (IOPL < CPL)
flagmask &= ~0x200;
if (is32) {
newpc = POPL();
seg = POPL();
tempflags = POPL();
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
if (is386 && ((tempflags >> 16) & VM_FLAG)) {
newsp = POPL();
newss = POPL();
segs[0] = POPL();
segs[1] = POPL();
segs[2] = POPL();
segs[3] = POPL();
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
cpu_state.eflags = tempflags >> 16;
cpu_cur_status |= CPU_STATUS_V86;
op_loadseg(segs[0], &cpu_state.seg_es);
do_seg_v86_init(&cpu_state.seg_es);
op_loadseg(segs[1], &cpu_state.seg_ds);
do_seg_v86_init(&cpu_state.seg_ds);
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
op_loadseg(segs[2], &cpu_state.seg_fs);
do_seg_v86_init(&cpu_state.seg_fs);
op_loadseg(segs[3], &cpu_state.seg_gs);
do_seg_v86_init(&cpu_state.seg_gs);
cpu_state.pc = newpc & 0xffff;
cpu_state.seg_cs.base = seg << 4;
cpu_state.seg_cs.limit = 0xffff;
cpu_state.seg_cs.limit_low = 0;
cpu_state.seg_cs.limit_high = 0xffff;
CS = seg;
cpu_state.seg_cs.access = 0xe2;
cpu_state.seg_cs.ar_high = 0x10;
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
ESP = newsp;
op_loadseg(newss, &cpu_state.seg_ss);
do_seg_v86_init(&cpu_state.seg_ss);
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
use32 = 0;
cpu_cur_status &= ~CPU_STATUS_USE32;
cpu_state.flags = (tempflags & 0xffd5) | 2;
cycles -= timing_iret_v86;
return;
}
} else {
newpc = POPW();
seg = POPW();
tempflags = POPW();
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
}
if (!(seg & 0xfffc)) {
ESP = oldsp;
x86gpf("pmodeiret(): Selector is NULL", 0);
return;
}
addr = seg & 0xfff8;
dt = (seg & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
ESP = oldsp;
x86gpf("pmodeiret(): Selector > DT limit", seg & 0xfffc);
return;
}
addr += dt->base;
if ((seg & 0x0003) < CPL) {
ESP = oldsp;
x86gpf("pmodeiret(): RPL < CPL", seg & 0xfffc);
return;
}
read_descriptor(addr, segdat, segdat32, 1);
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
switch (segdat[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming code */
if ((seg & 0x0003) != DPL) {
ESP = oldsp;
x86gpf("pmodeiret(): Non-conforming RPL != DPL", seg & 0xfffc);
return;
}
break;
case 0x1C00:
case 0x1D00:
case 0x1E00:
case 0x1F00: /* Conforming code */
if ((seg & 0x0003) < DPL) {
ESP = oldsp;
x86gpf("pmodeiret(): Conforming RPL < DPL", seg & ~3);
return;
}
break;
default:
ESP = oldsp;
x86gpf("pmodeiret(): Unknown type", seg & 0xfffc);
return;
}
if (!(segdat[2] & 0x8000)) {
ESP = oldsp;
x86np("IRET CS not present", seg & 0xfffc);
return;
}
if ((seg & 0x0003) == CPL) {
CS = seg;
do_seg_load(&cpu_state.seg_cs, segdat);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~0x60) | ((CS & 0x0003) << 5);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x0040);
cpl_override = 1;
writememw(0, addr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
cycles -= timing_iret_pm;
} else { /* Return to outer level */
oaddr = addr;
x86seg_log("Outer level\n");
if (is32) {
newsp = POPL();
newss = POPL();
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
} else {
newsp = POPW();
newss = POPW();
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
}
x86seg_log("IRET load stack %04X:%04X\n", newss, newsp);
if (!(newss & 0xfffc)) {
ESP = oldsp;
x86gpf("pmodeiret(): New SS selector is zero", newss & 0xfffc);
return;
}
addr = newss & 0xfff8;
dt = (newss & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
ESP = oldsp;
x86gpf("pmodeiret(): New SS selector > DT limit", newss & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat2, segdat232, 1);
if (cpu_state.abrt) {
ESP = oldsp;
return;
}
if ((newss & 3) != (seg & 3)) {
SP = oldsp;
x86gpf("pmodeiret(): New SS RPL > CS RPL", newss & 0xfffc);
return;
}
if ((segdat2[2] & 0x1a00) != 0x1200) {
ESP = oldsp;
x86gpf("pmodeiret(): New SS bad type", newss & 0xfffc);
return;
}
if (DPL2 != (seg & 0x0003)) {
ESP = oldsp;
x86gpf("pmodeiret(): New SS DPL != CS RPL", newss & 0xfffc);
return;
}
if (!(segdat2[2] & 0x8000)) {
ESP = oldsp;
x86np("IRET loading SS not present", newss & 0xfffc);
return;
}
SS = newss;
set_stack32((segdat2[3] & 0x40) ? 1 : 0);
if (stack32)
ESP = newsp;
else
SP = newsp;
do_seg_load(&cpu_state.seg_ss, segdat2);
cpl_override = 1;
writememw(0, addr + 4, segdat2[2] | 0x100); /* Set accessed bit */
writememw(0, oaddr + 4, segdat[2] | 0x100); /* Set accessed bit */
cpl_override = 0;
/* Conforming segments don't change CPL, so CPL = RPL */
if (segdat[2] & 0x0400)
segdat[2] = (segdat[2] & ~(3 << 13)) | ((seg & 3) << 13);
CS = seg;
do_seg_load(&cpu_state.seg_cs, segdat);
cpu_state.seg_cs.access = (cpu_state.seg_cs.access & ~0x60) | ((CS & 3) << 5);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat[3] & 0x40);
check_seg_valid(&cpu_state.seg_ds);
check_seg_valid(&cpu_state.seg_es);
check_seg_valid(&cpu_state.seg_fs);
check_seg_valid(&cpu_state.seg_gs);
cycles -= timing_iret_pm_outer;
}
cpu_state.pc = newpc;
cpu_state.flags = (cpu_state.flags & ~flagmask) | (tempflags & flagmask & 0xffd5) | 2;
if (is32)
cpu_state.eflags = tempflags >> 16;
}
void
#ifdef OPS_286_386
taskswitch286_2386(uint16_t seg, uint16_t *segdat, int is32)
#else
taskswitch286(uint16_t seg, uint16_t *segdat, int is32)
#endif
{
uint16_t tempw;
uint16_t new_ldt;
uint16_t new_es;
uint16_t new_cs;
uint16_t new_ss;
uint16_t new_ds;
uint16_t new_fs;
uint16_t new_gs;
uint16_t segdat2[4];
uint32_t base;
uint32_t limit;
uint32_t templ;
uint32_t new_cr3 = 0;
uint32_t new_eax;
uint32_t new_ebx;
uint32_t new_ecx;
uint32_t new_edx;
uint32_t new_esp;
uint32_t new_ebp;
uint32_t new_esi;
uint32_t new_edi;
uint32_t new_pc;
uint32_t new_flags;
uint32_t t_bit;
uint32_t addr;
uint32_t *segdat232 = (uint32_t *) segdat2;
const x86seg *dt;
base = segdat[1] | ((segdat[2] & 0x00ff) << 16);
limit = segdat[0];
if (is386) {
base |= (segdat[3] >> 8) << 24;
limit |= (segdat[3] & 0x000f) << 16;
}
if (is32) {
if (limit < 103) {
x86ts("taskswitch286(): limit < 103", seg);
return;
}
if ((optype == JMP) || (optype == CALL) || (optype == OPTYPE_INT)) {
if (tr.seg & 0x0004)
tempw = readmemw(ldt.base, (seg & 0xfff8) + 4);
else
tempw = readmemw(gdt.base, (seg & 0xfff8) + 4);
if (cpu_state.abrt)
return;
tempw |= 0x0200;
if (tr.seg & 0x0004)
writememw(ldt.base, (seg & 0xfff8) + 4, tempw);
else
writememw(gdt.base, (seg & 0xfff8) + 4, tempw);
}
if (cpu_state.abrt)
return;
if (optype == IRET)
cpu_state.flags &= ~NT_FLAG;
cpu_386_flags_rebuild();
writememl(tr.base, 0x1C, cr3);
writememl(tr.base, 0x20, cpu_state.pc);
writememl(tr.base, 0x24, cpu_state.flags | (cpu_state.eflags << 16));
writememl(tr.base, 0x28, EAX);
writememl(tr.base, 0x2C, ECX);
writememl(tr.base, 0x30, EDX);
writememl(tr.base, 0x34, EBX);
writememl(tr.base, 0x38, ESP);
writememl(tr.base, 0x3C, EBP);
writememl(tr.base, 0x40, ESI);
writememl(tr.base, 0x44, EDI);
writememl(tr.base, 0x48, ES);
writememl(tr.base, 0x4C, CS);
writememl(tr.base, 0x50, SS);
writememl(tr.base, 0x54, DS);
writememl(tr.base, 0x58, FS);
writememl(tr.base, 0x5C, GS);
if ((optype == JMP) || (optype == IRET)) {
if (tr.seg & 0x0004)
tempw = readmemw(ldt.base, (tr.seg & 0xfff8) + 4);
else
tempw = readmemw(gdt.base, (tr.seg & 0xfff8) + 4);
if (cpu_state.abrt)
return;
tempw &= ~0x0200;
if (tr.seg & 0x0004)
writememw(ldt.base, (tr.seg & 0xfff8) + 4, tempw);
else
writememw(gdt.base, (tr.seg & 0xfff8) + 4, tempw);
}
if (cpu_state.abrt)
return;
if ((optype == OPTYPE_INT) || (optype == CALL)) {
writememl(base, 0, tr.seg);
if (cpu_state.abrt)
return;
}
new_cr3 = readmeml(base, 0x1C);
new_pc = readmeml(base, 0x20);
new_flags = readmeml(base, 0x24);
if ((optype == OPTYPE_INT) || (optype == CALL))
new_flags |= NT_FLAG;
new_eax = readmeml(base, 0x28);
new_ecx = readmeml(base, 0x2C);
new_edx = readmeml(base, 0x30);
new_ebx = readmeml(base, 0x34);
new_esp = readmeml(base, 0x38);
new_ebp = readmeml(base, 0x3C);
new_esi = readmeml(base, 0x40);
new_edi = readmeml(base, 0x44);
new_es = readmemw(base, 0x48);
new_cs = readmemw(base, 0x4C);
new_ss = readmemw(base, 0x50);
new_ds = readmemw(base, 0x54);
new_fs = readmemw(base, 0x58);
new_gs = readmemw(base, 0x5C);
new_ldt = readmemw(base, 0x60);
t_bit = readmemb(base, 0x64) & 1;
cr0 |= 8;
cr3 = new_cr3;
flushmmucache();
cpu_state.pc = new_pc;
cpu_state.flags = new_flags;
cpu_state.eflags = new_flags >> 16;
cpu_386_flags_extract();
ldt.seg = new_ldt;
templ = (ldt.seg & ~7) + gdt.base;
ldt.limit = readmemw(0, templ);
if (readmemb(0, templ + 6) & 0x80) {
ldt.limit <<= 12;
ldt.limit |= 0xfff;
}
ldt.base = (readmemw(0, templ + 2)) | (readmemb(0, templ + 4) << 16) | (readmemb(0, templ + 7) << 24);
if (cpu_state.eflags & VM_FLAG) {
op_loadcs(new_cs);
set_use32(0);
cpu_cur_status |= CPU_STATUS_V86;
} else {
if (!(new_cs & 0xfffc)) {
x86ts("taskswitch286(): New CS selector is null", 0);
return;
}
addr = new_cs & 0xfff8;
dt = (new_cs & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86ts("taskswitch286(): New CS selector > DT limit", new_cs & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat2, segdat232, 0);
if (!(segdat2[2] & 0x8000)) {
x86np("TS loading CS not present", new_cs & 0xfffc);
return;
}
switch (segdat2[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming */
if ((new_cs & 0x0003) != DPL2) {
x86ts("TS loading CS RPL != DPL2", new_cs & 0xfffc);
return;
}
break;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
if ((new_cs & 0x0003) < DPL2) {
x86ts("TS loading CS RPL < DPL2", new_cs & 0xfffc);
return;
}
break;
default:
x86ts("TS loading CS unknown type", new_cs & 0xfffc);
return;
}
CS = new_cs;
do_seg_load(&cpu_state.seg_cs, segdat2);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(segdat2[3] & 0x0040);
cpu_cur_status &= ~CPU_STATUS_V86;
}
EAX = new_eax;
ECX = new_ecx;
EDX = new_edx;
EBX = new_ebx;
ESP = new_esp;
EBP = new_ebp;
ESI = new_esi;
EDI = new_edi;
op_loadseg(new_es, &cpu_state.seg_es);
op_loadseg(new_ss, &cpu_state.seg_ss);
op_loadseg(new_ds, &cpu_state.seg_ds);
op_loadseg(new_fs, &cpu_state.seg_fs);
op_loadseg(new_gs, &cpu_state.seg_gs);
if (!cpu_use_exec)
rf_flag_no_clear = 1;
if (t_bit) {
if (cpu_use_exec)
trap = 2;
else
trap |= 2;
#ifdef USE_DYNAREC
cpu_block_end = 1;
#endif
}
} else {
if (limit < 43) {
x86ts(NULL, seg);
return;
}
if ((optype == JMP) || (optype == CALL) || (optype == OPTYPE_INT)) {
if (tr.seg & 0x0004)
tempw = readmemw(ldt.base, (seg & 0xfff8) + 4);
else
tempw = readmemw(gdt.base, (seg & 0xfff8) + 4);
if (cpu_state.abrt)
return;
tempw |= 0x200;
if (tr.seg & 0x0004)
writememw(ldt.base, (seg & 0xfff8) + 4, tempw);
else
writememw(gdt.base, (seg & 0xfff8) + 4, tempw);
}
if (cpu_state.abrt)
return;
if (optype == IRET)
cpu_state.flags &= ~NT_FLAG;
cpu_386_flags_rebuild();
writememw(tr.base, 0x0e, cpu_state.pc);
writememw(tr.base, 0x10, cpu_state.flags);
writememw(tr.base, 0x12, AX);
writememw(tr.base, 0x14, CX);
writememw(tr.base, 0x16, DX);
writememw(tr.base, 0x18, BX);
writememw(tr.base, 0x1a, SP);
writememw(tr.base, 0x1c, BP);
writememw(tr.base, 0x1e, SI);
writememw(tr.base, 0x20, DI);
writememw(tr.base, 0x22, ES);
writememw(tr.base, 0x24, CS);
writememw(tr.base, 0x26, SS);
writememw(tr.base, 0x28, DS);
if ((optype == JMP) || (optype == IRET)) {
if (tr.seg & 0x0004)
tempw = readmemw(ldt.base, (tr.seg & 0xfff8) + 4);
else
tempw = readmemw(gdt.base, (tr.seg & 0xfff8) + 4);
if (cpu_state.abrt)
return;
tempw &= ~0x200;
if (tr.seg & 0x0004)
writememw(ldt.base, (tr.seg & 0xfff8) + 4, tempw);
else
writememw(gdt.base, (tr.seg & 0xfff8) + 4, tempw);
}
if (cpu_state.abrt)
return;
if ((optype == OPTYPE_INT) || (optype == CALL)) {
writememw(base, 0, tr.seg);
if (cpu_state.abrt)
return;
}
new_pc = readmemw(base, 0x0e);
new_flags = readmemw(base, 0x10);
if ((optype == OPTYPE_INT) || (optype == CALL))
new_flags |= NT_FLAG;
new_eax = readmemw(base, 0x12);
new_ecx = readmemw(base, 0x14);
new_edx = readmemw(base, 0x16);
new_ebx = readmemw(base, 0x18);
new_esp = readmemw(base, 0x1a);
new_ebp = readmemw(base, 0x1c);
new_esi = readmemw(base, 0x1e);
new_edi = readmemw(base, 0x20);
new_es = readmemw(base, 0x22);
new_cs = readmemw(base, 0x24);
new_ss = readmemw(base, 0x26);
new_ds = readmemw(base, 0x28);
new_ldt = readmemw(base, 0x2a);
msw |= 8;
cpu_state.pc = new_pc;
cpu_state.flags = new_flags;
cpu_386_flags_extract();
ldt.seg = new_ldt;
templ = (ldt.seg & 0xfff8) + gdt.base;
ldt.limit = readmemw(0, templ);
ldt.base = (readmemw(0, templ + 2)) | (readmemb(0, templ + 4) << 16);
if (is386) {
if (readmemb(0, templ + 6) & 0x80) {
ldt.limit <<= 12;
ldt.limit |= 0xfff;
}
ldt.base |= (readmemb(0, templ + 7) << 24);
}
if (!(new_cs & 0xfff8) && !(new_cs & 0x0004)) {
x86ts(NULL, 0);
return;
}
addr = new_cs & 0xfff8;
dt = (new_cs & 0x0004) ? &ldt : &gdt;
if ((addr + 7) > dt->limit) {
x86ts(NULL, new_cs & 0xfffc);
return;
}
addr += dt->base;
read_descriptor(addr, segdat2, segdat232, 0);
if (!(segdat2[2] & 0x8000)) {
x86np("TS loading CS not present", new_cs & 0xfffc);
return;
}
switch (segdat2[2] & 0x1f00) {
case 0x1800:
case 0x1900:
case 0x1a00:
case 0x1b00: /* Non-conforming */
if ((new_cs & 0x0003) != DPL2) {
x86ts(NULL, new_cs & 0xfffc);
return;
}
break;
case 0x1c00:
case 0x1d00:
case 0x1e00:
case 0x1f00: /* Conforming */
if ((new_cs & 0x0003) < DPL2) {
x86ts(NULL, new_cs & 0xfffc);
return;
}
break;
default:
x86ts(NULL, new_cs & 0xfffc);
return;
}
CS = new_cs;
do_seg_load(&cpu_state.seg_cs, segdat2);
if ((CPL == 3) && (oldcpl != 3))
flushmmucache_nopc();
#ifdef USE_NEW_DYNAREC
oldcpl = CPL;
#endif
set_use32(0);
EAX = new_eax | 0xffff0000;
ECX = new_ecx | 0xffff0000;
EDX = new_edx | 0xffff0000;
EBX = new_ebx | 0xffff0000;
ESP = new_esp | 0xffff0000;
EBP = new_ebp | 0xffff0000;
ESI = new_esi | 0xffff0000;
EDI = new_edi | 0xffff0000;
op_loadseg(new_es, &cpu_state.seg_es);
op_loadseg(new_ss, &cpu_state.seg_ss);
op_loadseg(new_ds, &cpu_state.seg_ds);
if (is386) {
op_loadseg(0, &cpu_state.seg_fs);
op_loadseg(0, &cpu_state.seg_gs);
}
}
tr.seg = seg;
tr.base = base;
tr.limit = limit;
tr.access = segdat[2] >> 8;
tr.ar_high = segdat[3] & 0xff;
if (!cpu_use_exec)
dr[7] &= 0xFFFFFFAA;
}
void
#ifdef OPS_286_386
cyrix_write_seg_descriptor_2386(uint32_t addr, x86seg *seg)
#else
cyrix_write_seg_descriptor(uint32_t addr, x86seg *seg)
#endif
{
uint32_t limit_raw = seg->limit;
if (seg->ar_high & 0x80)
limit_raw >>= 12;
writememl(0, addr, (limit_raw & 0xffff) | (seg->base << 16));
writememl(0, addr + 4, ((seg->base >> 16) & 0xff) | (seg->access << 8) | (limit_raw & 0xf0000) | (seg->ar_high << 16) | (seg->base & 0xff000000));
}
void
#ifdef OPS_286_386
cyrix_load_seg_descriptor_2386(uint32_t addr, x86seg *seg)
#else
cyrix_load_seg_descriptor(uint32_t addr, x86seg *seg)
#endif
{
uint16_t segdat[4];
uint16_t selector;
segdat[0] = readmemw(0, addr);
segdat[1] = readmemw(0, addr + 2);
segdat[2] = readmemw(0, addr + 4);
segdat[3] = readmemw(0, addr + 6);
selector = readmemw(0, addr + 8);
if (!cpu_state.abrt) {
do_seg_load(seg, segdat);
seg->seg = selector;
seg->checked = 0;
if (seg == &cpu_state.seg_ds) {
if (seg->base == 0 && seg->limit_low == 0 && seg->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATDS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATDS;
#ifdef USE_DYNAREC
codegen_flat_ds = 0;
#endif
}
if (seg == &cpu_state.seg_ss) {
if (seg->base == 0 && seg->limit_low == 0 && seg->limit_high == 0xffffffff)
cpu_cur_status &= ~CPU_STATUS_NOTFLATSS;
else
cpu_cur_status |= CPU_STATUS_NOTFLATSS;
set_stack32((segdat[3] & 0x40) ? 1 : 0);
#ifdef USE_DYNAREC
codegen_flat_ss = 0;
#endif
}
}
}
``` | /content/code_sandbox/src/cpu/x86seg.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 23,760 |
```c
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(__APPLE__) && defined(__aarch64__)
# include <pthread.h>
#endif
#include <wchar.h>
#include <math.h>
#ifndef INFINITY
# define INFINITY (__builtin_inff())
#endif
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include "x86.h"
#include "x86_ops.h"
#include "x86seg_common.h"
#include "x86seg.h"
#include "x87_sf.h"
#include "x87.h"
#include <86box/io.h>
#include <86box/mem.h>
#include <86box/nmi.h>
#include <86box/pic.h>
#include <86box/timer.h>
#include <86box/fdd.h>
#include <86box/fdc.h>
#include <86box/machine.h>
#include <86box/plat_fallthrough.h>
#include <86box/gdbstub.h>
#ifdef USE_DYNAREC
# include "codegen.h"
# ifdef USE_NEW_DYNAREC
# include "codegen_backend.h"
# endif
#endif
#ifdef IS_DYNAREC
# undef IS_DYNAREC
#endif
#include "386_common.h"
#if defined(__APPLE__) && defined(__aarch64__)
# include <pthread.h>
#endif
#define CPU_BLOCK_END() cpu_block_end = 1
int cpu_override_dynarec = 0;
int inrecomp = 0;
int cpu_block_end = 0;
int cpu_end_block_after_ins = 0;
#ifdef ENABLE_386_DYNAREC_LOG
int x386_dynarec_do_log = ENABLE_386_DYNAREC_LOG;
void
x386_dynarec_log(const char *fmt, ...)
{
va_list ap;
if (x386_dynarec_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define x386_dynarec_log(fmt, ...)
#endif
static __inline void
fetch_ea_32_long(uint32_t rmdat)
{
eal_r = eal_w = NULL;
easeg = cpu_state.ea_seg->base;
if (cpu_rm == 4) {
uint8_t sib = rmdat >> 8;
switch (cpu_mod) {
case 0:
cpu_state.eaaddr = cpu_state.regs[sib & 7].l;
cpu_state.pc++;
break;
case 1:
cpu_state.pc++;
cpu_state.eaaddr = ((uint32_t) (int8_t) getbyte()) + cpu_state.regs[sib & 7].l;
break;
case 2:
cpu_state.eaaddr = (fastreadl(cs + cpu_state.pc + 1)) + cpu_state.regs[sib & 7].l;
cpu_state.pc += 5;
break;
}
/*SIB byte present*/
if ((sib & 7) == 5 && !cpu_mod)
cpu_state.eaaddr = getlong();
else if ((sib & 6) == 4 && !cpu_state.ssegs) {
easeg = ss;
cpu_state.ea_seg = &cpu_state.seg_ss;
}
if (((sib >> 3) & 7) != 4)
cpu_state.eaaddr += cpu_state.regs[(sib >> 3) & 7].l << (sib >> 6);
} else {
cpu_state.eaaddr = cpu_state.regs[cpu_rm].l;
if (cpu_mod) {
if (cpu_rm == 5 && !cpu_state.ssegs) {
easeg = ss;
cpu_state.ea_seg = &cpu_state.seg_ss;
}
if (cpu_mod == 1) {
cpu_state.eaaddr += ((uint32_t) (int8_t) (rmdat >> 8));
cpu_state.pc++;
} else {
cpu_state.eaaddr += getlong();
}
} else if (cpu_rm == 5) {
cpu_state.eaaddr = getlong();
}
}
if (easeg != 0xFFFFFFFF && ((easeg + cpu_state.eaaddr) & 0xFFF) <= 0xFFC) {
uint32_t addr = easeg + cpu_state.eaaddr;
if (readlookup2[addr >> 12] != (uintptr_t) -1)
eal_r = (uint32_t *) (readlookup2[addr >> 12] + addr);
if (writelookup2[addr >> 12] != (uintptr_t) -1)
eal_w = (uint32_t *) (writelookup2[addr >> 12] + addr);
}
}
static __inline void
fetch_ea_16_long(uint32_t rmdat)
{
eal_r = eal_w = NULL;
easeg = cpu_state.ea_seg->base;
if (!cpu_mod && cpu_rm == 6) {
cpu_state.eaaddr = getword();
} else {
switch (cpu_mod) {
case 0:
cpu_state.eaaddr = 0;
break;
case 1:
cpu_state.eaaddr = (uint16_t) (int8_t) (rmdat >> 8);
cpu_state.pc++;
break;
case 2:
cpu_state.eaaddr = getword();
break;
}
cpu_state.eaaddr += (*mod1add[0][cpu_rm]) + (*mod1add[1][cpu_rm]);
if (mod1seg[cpu_rm] == &ss && !cpu_state.ssegs) {
easeg = ss;
cpu_state.ea_seg = &cpu_state.seg_ss;
}
cpu_state.eaaddr &= 0xFFFF;
}
if (easeg != 0xFFFFFFFF && ((easeg + cpu_state.eaaddr) & 0xFFF) <= 0xFFC) {
uint32_t addr = easeg + cpu_state.eaaddr;
if (readlookup2[addr >> 12] != (uintptr_t) -1)
eal_r = (uint32_t *) (readlookup2[addr >> 12] + addr);
if (writelookup2[addr >> 12] != (uintptr_t) -1)
eal_w = (uint32_t *) (writelookup2[addr >> 12] + addr);
}
}
#define fetch_ea_16(rmdat) \
cpu_state.pc++; \
cpu_mod = (rmdat >> 6) & 3; \
cpu_reg = (rmdat >> 3) & 7; \
cpu_rm = rmdat & 7; \
if (cpu_mod != 3) { \
fetch_ea_16_long(rmdat); \
if (cpu_state.abrt) \
return 1; \
}
#define fetch_ea_32(rmdat) \
cpu_state.pc++; \
cpu_mod = (rmdat >> 6) & 3; \
cpu_reg = (rmdat >> 3) & 7; \
cpu_rm = rmdat & 7; \
if (cpu_mod != 3) { \
fetch_ea_32_long(rmdat); \
} \
if (cpu_state.abrt) \
return 1
#include "x86_flags.h"
#define PREFETCH_RUN(instr_cycles, bytes, modrm, reads, reads_l, writes, writes_l, ea32) \
do { \
if (cpu_prefetch_cycles) \
prefetch_run(instr_cycles, bytes, modrm, reads, reads_l, writes, writes_l, ea32); \
} while (0)
#define PREFETCH_PREFIX() \
do { \
if (cpu_prefetch_cycles) \
prefetch_prefixes++; \
} while (0)
#define PREFETCH_FLUSH() prefetch_flush()
#define OP_TABLE(name) ops_##name
#if 0
# define CLOCK_CYCLES(c) \
{ \
if (fpu_cycles > 0) { \
fpu_cycles -= (c); \
if (fpu_cycles < 0) { \
cycles += fpu_cycles; \
} \
} else { \
cycles -= (c); \
} \
}
# define CLOCK_CYCLES_FPU(c) cycles -= (c)
# define CONCURRENCY_CYCLES(c) fpu_cycles = (c)
#else
# define CLOCK_CYCLES(c) cycles -= (c)
# define CLOCK_CYCLES_FPU(c) cycles -= (c)
# define CONCURRENCY_CYCLES(c)
#endif
#define CLOCK_CYCLES_ALWAYS(c) cycles -= (c)
#include "386_ops.h"
#ifdef USE_DEBUG_REGS_486
# define CACHE_ON() (!(cr0 & (1 << 30)) && !(cpu_state.flags & T_FLAG) && !(dr[7] & 0xFF))
#else
# define CACHE_ON() (!(cr0 & (1 << 30)) && !(cpu_state.flags & T_FLAG))
#endif
#ifdef USE_DYNAREC
int32_t cycles_main = 0;
static int32_t cycles_old = 0;
static uint64_t tsc_old = 0;
# ifdef USE_ACYCS
int32_t acycs = 0;
# endif
void
update_tsc(void)
{
int cycdiff;
uint64_t delta;
cycdiff = cycles_old - cycles;
# ifdef USE_ACYCS
if (inrecomp)
cycdiff += acycs;
# endif
delta = tsc - tsc_old;
if (delta > 0) {
/* TSC has changed, this means interim timer processing has happened,
see how much we still need to add. */
cycdiff -= delta;
}
if (cycdiff > 0)
tsc += cycdiff;
if (cycdiff > 0) {
if (TIMER_VAL_LESS_THAN_VAL(timer_target, (uint32_t) tsc))
timer_process();
}
}
static __inline void
exec386_dynarec_int(void)
{
cpu_block_end = 0;
x86_was_reset = 0;
# ifdef USE_DEBUG_REGS_486
if (trap & 2) {
# else
if (trap == 2) {
# endif
/* Handle the T bit in the new TSS first. */
CPU_BLOCK_END();
goto block_ended;
}
while (!cpu_block_end) {
# ifndef USE_NEW_DYNAREC
oldcs = CS;
oldcpl = CPL;
# endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ea_seg = &cpu_state.seg_ds;
cpu_state.ssegs = 0;
# ifdef USE_DEBUG_REGS_486
if (UNLIKELY(cpu_386_check_instruction_fault())) {
x86gen();
goto block_ended;
}
# endif
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
# ifdef ENABLE_386_DYNAREC_LOG
if (in_smm)
x386_dynarec_log("[%04X:%08X] fetchdat = %08X\n", CS, cpu_state.pc, fetchdat);
# endif
if (!cpu_state.abrt) {
opcode = fetchdat & 0xFF;
fetchdat >>= 8;
# ifdef USE_DEBUG_REGS_486
trap |= !!(cpu_state.flags & T_FLAG);
# else
trap = cpu_state.flags & T_FLAG;
# endif
cpu_state.pc++;
# ifdef USE_DEBUG_REGS_486
cpu_state.eflags &= ~(RF_FLAG);
# endif
x86_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
}
# ifndef USE_NEW_DYNAREC
if (!use32)
cpu_state.pc &= 0xffff;
# endif
# ifdef USE_DEBUG_REGS_486
if (!cpu_state.abrt) {
if (!rf_flag_no_clear) {
cpu_state.eflags &= ~RF_FLAG;
}
rf_flag_no_clear = 0;
}
# endif
if (((cs + cpu_state.pc) >> 12) != pccache)
CPU_BLOCK_END();
if (cpu_end_block_after_ins) {
cpu_end_block_after_ins--;
if (!cpu_end_block_after_ins)
CPU_BLOCK_END();
}
if (cpu_init)
CPU_BLOCK_END();
if (cpu_state.abrt)
CPU_BLOCK_END();
if (smi_line)
CPU_BLOCK_END();
else if (trap)
CPU_BLOCK_END();
else if (nmi && nmi_enable && nmi_mask)
CPU_BLOCK_END();
else if ((cpu_state.flags & I_FLAG) && pic.int_pending && !cpu_end_block_after_ins)
CPU_BLOCK_END();
}
block_ended:
if (!cpu_state.abrt && trap) {
# ifdef USE_DEBUG_REGS_486
//pclog("Debug trap 0x%X\n", trap);
if (trap & 2) dr[6] |= 0x8000;
if (trap & 1) dr[6] |= 0x4000;
# else
dr[6] |= (trap == 2) ? 0x8000 : 0x4000;
# endif
trap = 0;
# ifndef USE_NEW_DYNAREC
oldcs = CS;
# endif
cpu_state.oldpc = cpu_state.pc;
x86_int(1);
}
cpu_end_block_after_ins = 0;
}
static __inline void
exec386_dynarec_dyn(void)
{
uint32_t start_pc = 0;
uint32_t phys_addr = get_phys(cs + cpu_state.pc);
int hash = HASH(phys_addr);
# ifdef USE_NEW_DYNAREC
codeblock_t *block = &codeblock[codeblock_hash[hash]];
# else
codeblock_t *block = codeblock_hash[hash];
# endif
int valid_block = 0;
# ifdef USE_NEW_DYNAREC
if (!cpu_state.abrt)
# else
if (block && !cpu_state.abrt)
# endif
{
page_t *page = &pages[phys_addr >> 12];
/* Block must match current CS, PC, code segment size,
and physical address. The physical address check will
also catch any page faults at this stage */
valid_block = (block->pc == cs + cpu_state.pc) && (block->_cs == cs) && (block->phys == phys_addr) && !((block->status ^ cpu_cur_status) & CPU_STATUS_FLAGS) && ((block->status & cpu_cur_status & CPU_STATUS_MASK) == (cpu_cur_status & CPU_STATUS_MASK));
if (!valid_block) {
uint64_t mask = (uint64_t) 1 << ((phys_addr >> PAGE_MASK_SHIFT) & PAGE_MASK_MASK);
# ifdef USE_NEW_DYNAREC
int byte_offset = (phys_addr >> PAGE_BYTE_MASK_SHIFT) & PAGE_BYTE_MASK_OFFSET_MASK;
uint64_t byte_mask = 1ULL << (PAGE_BYTE_MASK_MASK & 0x3f);
if ((page->code_present_mask & mask) ||
((page->mem != page_ff) && (page->byte_code_present_mask[byte_offset] & byte_mask)))
# else
if (page->code_present_mask[(phys_addr >> PAGE_MASK_INDEX_SHIFT) & PAGE_MASK_INDEX_MASK] & mask)
# endif
{
/* Walk page tree to see if we find the correct block */
codeblock_t *new_block = codeblock_tree_find(phys_addr, cs);
if (new_block) {
valid_block = (new_block->pc == cs + cpu_state.pc) && (new_block->_cs == cs) && (new_block->phys == phys_addr) && !((new_block->status ^ cpu_cur_status) & CPU_STATUS_FLAGS) && ((new_block->status & cpu_cur_status & CPU_STATUS_MASK) == (cpu_cur_status & CPU_STATUS_MASK));
if (valid_block) {
block = new_block;
# ifdef USE_NEW_DYNAREC
codeblock_hash[hash] = get_block_nr(block);
# endif
}
}
}
}
if (valid_block && (block->page_mask & *block->dirty_mask)) {
# ifdef USE_NEW_DYNAREC
codegen_check_flush(page, page->dirty_mask, phys_addr);
if (block->pc == BLOCK_PC_INVALID)
valid_block = 0;
else if (block->flags & CODEBLOCK_IN_DIRTY_LIST)
block->flags &= ~CODEBLOCK_WAS_RECOMPILED;
# else
codegen_check_flush(page, page->dirty_mask[(phys_addr >> 10) & 3], phys_addr);
page->dirty_mask[(phys_addr >> 10) & 3] = 0;
if (!block->valid)
valid_block = 0;
# endif
}
if (valid_block && block->page_mask2) {
/* We don't want the second page to cause a page
fault at this stage - that would break any
code crossing a page boundary where the first
page is present but the second isn't. Instead
allow the first page to be interpreted and for
the page fault to occur when the page boundary
is actually crossed.*/
# ifdef USE_NEW_DYNAREC
uint32_t phys_addr_2 = get_phys_noabrt(block->pc + ((block->flags & CODEBLOCK_BYTE_MASK) ? 0x40 : 0x400));
# else
uint32_t phys_addr_2 = get_phys_noabrt(block->endpc);
# endif
page_t *page_2 = &pages[phys_addr_2 >> 12];
if ((block->phys_2 ^ phys_addr_2) & ~0xfff)
valid_block = 0;
else if (block->page_mask2 & *block->dirty_mask2) {
# ifdef USE_NEW_DYNAREC
codegen_check_flush(page_2, page_2->dirty_mask, phys_addr_2);
if (block->pc == BLOCK_PC_INVALID)
valid_block = 0;
else if (block->flags & CODEBLOCK_IN_DIRTY_LIST)
block->flags &= ~CODEBLOCK_WAS_RECOMPILED;
# else
codegen_check_flush(page_2, page_2->dirty_mask[(phys_addr_2 >> 10) & 3], phys_addr_2);
page_2->dirty_mask[(phys_addr_2 >> 10) & 3] = 0;
if (!block->valid)
valid_block = 0;
# endif
}
}
# ifdef USE_NEW_DYNAREC
if (valid_block && (block->flags & CODEBLOCK_IN_DIRTY_LIST)) {
block->flags &= ~CODEBLOCK_WAS_RECOMPILED;
if (block->flags & CODEBLOCK_BYTE_MASK)
block->flags |= CODEBLOCK_NO_IMMEDIATES;
else
block->flags |= CODEBLOCK_BYTE_MASK;
}
if (valid_block && (block->flags & CODEBLOCK_WAS_RECOMPILED) && (block->flags & CODEBLOCK_STATIC_TOP) && block->TOP != (cpu_state.TOP & 7))
# else
if (valid_block && block->was_recompiled && (block->flags & CODEBLOCK_STATIC_TOP) && block->TOP != cpu_state.TOP)
# endif
{
/* FPU top-of-stack does not match the value this block was compiled
with, re-compile using dynamic top-of-stack*/
# ifdef USE_NEW_DYNAREC
block->flags &= ~(CODEBLOCK_STATIC_TOP | CODEBLOCK_WAS_RECOMPILED);
# else
block->flags &= ~CODEBLOCK_STATIC_TOP;
block->was_recompiled = 0;
# endif
}
}
# ifdef USE_NEW_DYNAREC
if (valid_block && (block->flags & CODEBLOCK_WAS_RECOMPILED))
# else
if (valid_block && block->was_recompiled)
# endif
{
void (*code)(void) = (void *) &block->data[BLOCK_START];
# ifndef USE_NEW_DYNAREC
codeblock_hash[hash] = block;
# endif
inrecomp = 1;
code();
# ifdef USE_ACYCS
acycs = 0;
# endif
inrecomp = 0;
# ifndef USE_NEW_DYNAREC
if (!use32)
cpu_state.pc &= 0xffff;
# endif
} else if (valid_block && !cpu_state.abrt) {
# ifdef USE_NEW_DYNAREC
start_pc = cs + cpu_state.pc;
const int max_block_size = (block->flags & CODEBLOCK_BYTE_MASK) ? ((128 - 25) - (start_pc & 0x3f)) : 1000;
# else
start_pc = cpu_state.pc;
# endif
cpu_block_end = 0;
x86_was_reset = 0;
# if defined(__APPLE__) && defined(__aarch64__)
if (__builtin_available(macOS 11.0, *)) {
pthread_jit_write_protect_np(0);
}
# endif
codegen_block_start_recompile(block);
codegen_in_recompile = 1;
while (!cpu_block_end) {
# ifndef USE_NEW_DYNAREC
oldcs = CS;
oldcpl = CPL;
# endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ea_seg = &cpu_state.seg_ds;
cpu_state.ssegs = 0;
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
# ifdef ENABLE_386_DYNAREC_LOG
if (in_smm)
x386_dynarec_log("[%04X:%08X] fetchdat = %08X\n", CS, cpu_state.pc, fetchdat);
# endif
if (!cpu_state.abrt) {
opcode = fetchdat & 0xFF;
fetchdat >>= 8;
cpu_state.pc++;
codegen_generate_call(opcode, x86_opcodes[(opcode | cpu_state.op32) & 0x3ff], fetchdat, cpu_state.pc, cpu_state.pc - 1);
x86_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
if (x86_was_reset)
break;
}
# ifndef USE_NEW_DYNAREC
if (!use32)
cpu_state.pc &= 0xffff;
# endif
/* Cap source code at 4000 bytes per block; this
will prevent any block from spanning more than
2 pages. In practice this limit will never be
hit, as host block size is only 2kB*/
# ifdef USE_NEW_DYNAREC
if (((cs + cpu_state.pc) - start_pc) >= max_block_size)
# else
if ((cpu_state.pc - start_pc) > 1000)
# endif
CPU_BLOCK_END();
if (cpu_init)
CPU_BLOCK_END();
if ((cpu_state.flags & T_FLAG) || (trap == 2))
CPU_BLOCK_END();
if (smi_line)
CPU_BLOCK_END();
if (nmi && nmi_enable && nmi_mask)
CPU_BLOCK_END();
if ((cpu_state.flags & I_FLAG) && pic.int_pending && !cpu_end_block_after_ins)
CPU_BLOCK_END();
if (cpu_end_block_after_ins) {
cpu_end_block_after_ins--;
if (!cpu_end_block_after_ins)
CPU_BLOCK_END();
}
if (cpu_state.abrt) {
if (!(cpu_state.abrt & ABRT_EXPECTED))
codegen_block_remove();
CPU_BLOCK_END();
}
}
cpu_end_block_after_ins = 0;
if ((!cpu_state.abrt || (cpu_state.abrt & ABRT_EXPECTED)) && !x86_was_reset)
codegen_block_end_recompile(block);
if (x86_was_reset)
codegen_reset();
codegen_in_recompile = 0;
# if defined(__APPLE__) && defined(__aarch64__)
if (__builtin_available(macOS 11.0, *)) {
pthread_jit_write_protect_np(1);
}
# endif
} else if (!cpu_state.abrt) {
/* Mark block but do not recompile */
# ifdef USE_NEW_DYNAREC
start_pc = cs + cpu_state.pc;
const int max_block_size = (block->flags & CODEBLOCK_BYTE_MASK) ? ((128 - 25) - (start_pc & 0x3f)) : 1000;
# else
start_pc = cpu_state.pc;
# endif
cpu_block_end = 0;
x86_was_reset = 0;
codegen_block_init(phys_addr);
while (!cpu_block_end) {
# ifndef USE_NEW_DYNAREC
oldcs = CS;
oldcpl = CPL;
# endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ea_seg = &cpu_state.seg_ds;
cpu_state.ssegs = 0;
codegen_endpc = (cs + cpu_state.pc) + 8;
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
# ifdef ENABLE_386_DYNAREC_LOG
if (in_smm)
x386_dynarec_log("[%04X:%08X] fetchdat = %08X\n", CS, cpu_state.pc, fetchdat);
# endif
if (!cpu_state.abrt) {
opcode = fetchdat & 0xFF;
fetchdat >>= 8;
cpu_state.pc++;
x86_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
if (x86_was_reset)
break;
}
# ifndef USE_NEW_DYNAREC
if (!use32)
cpu_state.pc &= 0xffff;
# endif
/* Cap source code at 4000 bytes per block; this
will prevent any block from spanning more than
2 pages. In practice this limit will never be
hit, as host block size is only 2kB */
# ifdef USE_NEW_DYNAREC
if (((cs + cpu_state.pc) - start_pc) >= max_block_size)
# else
if ((cpu_state.pc - start_pc) > 1000)
# endif
CPU_BLOCK_END();
if (cpu_init)
CPU_BLOCK_END();
if (cpu_state.flags & T_FLAG)
CPU_BLOCK_END();
if (smi_line)
CPU_BLOCK_END();
if (nmi && nmi_enable && nmi_mask)
CPU_BLOCK_END();
if ((cpu_state.flags & I_FLAG) && pic.int_pending && !cpu_end_block_after_ins)
CPU_BLOCK_END();
if (cpu_end_block_after_ins) {
cpu_end_block_after_ins--;
if (!cpu_end_block_after_ins)
CPU_BLOCK_END();
}
if (cpu_state.abrt) {
if (!(cpu_state.abrt & ABRT_EXPECTED))
codegen_block_remove();
CPU_BLOCK_END();
}
}
cpu_end_block_after_ins = 0;
if ((!cpu_state.abrt || (cpu_state.abrt & ABRT_EXPECTED)) && !x86_was_reset)
codegen_block_end();
if (x86_was_reset)
codegen_reset();
}
# ifdef USE_NEW_DYNAREC
else
cpu_state.oldpc = cpu_state.pc;
# endif
}
void
exec386_dynarec(int32_t cycs)
{
int vector;
int tempi;
int32_t cycdiff;
int32_t oldcyc;
int32_t oldcyc2;
uint64_t oldtsc;
uint64_t delta;
int32_t cyc_period = cycs / 2000; /*5us*/
# ifdef USE_ACYCS
acycs = 0;
# endif
cycles_main += cycs;
while (cycles_main > 0) {
int32_t cycles_start;
cycles += cyc_period;
cycles_start = cycles;
while (cycles > 0) {
# ifndef USE_NEW_DYNAREC
oldcs = CS;
cpu_state.oldpc = cpu_state.pc;
oldcpl = CPL;
cpu_state.op32 = use32;
cycdiff = 0;
# endif
oldcyc = oldcyc2 = cycles;
cycles_old = cycles;
oldtsc = tsc;
tsc_old = tsc;
if ((!CACHE_ON()) || cpu_override_dynarec) /*Interpret block*/
{
exec386_dynarec_int();
} else {
exec386_dynarec_dyn();
}
if (cpu_init) {
cpu_init = 0;
resetx86();
}
if (cpu_state.abrt) {
flags_rebuild();
tempi = cpu_state.abrt & ABRT_MASK;
cpu_state.abrt = 0;
x86_doabrt(tempi);
if (cpu_state.abrt) {
cpu_state.abrt = 0;
cpu_state.pc = cpu_state.oldpc;
# ifndef USE_NEW_DYNAREC
CS = oldcs;
# endif
pmodeint(8, 0);
if (cpu_state.abrt) {
cpu_state.abrt = 0;
softresetx86();
cpu_set_edx();
# ifdef ENABLE_386_DYNAREC_LOG
x386_dynarec_log("Triple fault - reset\n");
# endif
}
}
}
if (smi_line)
enter_smm_check(0);
else if (nmi && nmi_enable && nmi_mask) {
# ifndef USE_NEW_DYNAREC
oldcs = CS;
# endif
cpu_state.oldpc = cpu_state.pc;
x86_int(2);
nmi_enable = 0;
# ifdef OLD_NMI_BEHAVIOR
if (nmi_auto_clear) {
nmi_auto_clear = 0;
nmi = 0;
}
# else
nmi = 0;
# endif
} else if ((cpu_state.flags & I_FLAG) && pic.int_pending) {
vector = picinterrupt();
if (vector != -1) {
# ifndef USE_NEW_DYNAREC
oldcs = CS;
# endif
cpu_state.oldpc = cpu_state.pc;
x86_int(vector);
}
}
cycdiff = oldcyc - cycles;
delta = tsc - oldtsc;
if (delta > 0) {
/* TSC has changed, this means interim timer processing has happened,
see how much we still need to add. */
cycdiff -= delta;
if (cycdiff > 0)
tsc += cycdiff;
} else {
/* TSC has not changed. */
tsc += cycdiff;
}
if (cycdiff > 0) {
if (TIMER_VAL_LESS_THAN_VAL(timer_target, (uint32_t) tsc))
timer_process();
}
# ifdef USE_GDBSTUB
if (gdbstub_instruction())
return;
# endif
}
cycles_main -= (cycles_start - cycles);
}
}
#endif
void
exec386(int32_t cycs)
{
int vector;
int tempi;
int32_t cycdiff;
int32_t oldcyc;
int32_t cycle_period;
int32_t ins_cycles;
uint32_t addr;
cycles += cycs;
while (cycles > 0) {
cycle_period = (timer_target - (uint32_t) tsc) + 1;
x86_was_reset = 0;
cycdiff = 0;
oldcyc = cycles;
while (cycdiff < cycle_period) {
ins_cycles = cycles;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
oldcpl = CPL;
#endif
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
#ifndef USE_NEW_DYNAREC
x86_was_reset = 0;
#endif
cpu_state.ea_seg = &cpu_state.seg_ds;
cpu_state.ssegs = 0;
#ifdef USE_DEBUG_REGS_486
if (UNLIKELY(cpu_386_check_instruction_fault())) {
x86gen();
goto block_ended;
}
#endif
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
if (!cpu_state.abrt) {
#ifdef ENABLE_386_LOG
if (in_smm)
x386_dynarec_log("[%04X:%08X] %08X\n", CS, cpu_state.pc, fetchdat);
#endif
opcode = fetchdat & 0xFF;
fetchdat >>= 8;
#ifdef USE_DEBUG_REGS_486
trap |= !!(cpu_state.flags & T_FLAG);
#else
trap = cpu_state.flags & T_FLAG;
#endif
cpu_state.pc++;
#ifdef USE_DEBUG_REGS_486
cpu_state.eflags &= ~(RF_FLAG);
#endif
x86_opcodes[(opcode | cpu_state.op32) & 0x3ff](fetchdat);
if (x86_was_reset)
break;
}
#ifdef ENABLE_386_LOG
else if (in_smm)
x386_dynarec_log("[%04X:%08X] ABRT\n", CS, cpu_state.pc);
#endif
#ifndef USE_NEW_DYNAREC
if (!use32)
cpu_state.pc &= 0xffff;
#endif
if (cpu_end_block_after_ins)
cpu_end_block_after_ins--;
#ifdef USE_DEBUG_REGS_486
block_ended:
#endif
if (cpu_state.abrt) {
flags_rebuild();
tempi = cpu_state.abrt & ABRT_MASK;
cpu_state.abrt = 0;
x86_doabrt(tempi);
if (cpu_state.abrt) {
cpu_state.abrt = 0;
#ifndef USE_NEW_DYNAREC
CS = oldcs;
#endif
cpu_state.pc = cpu_state.oldpc;
x386_dynarec_log("Double fault\n");
pmodeint(8, 0);
if (cpu_state.abrt) {
cpu_state.abrt = 0;
softresetx86();
cpu_set_edx();
#ifdef ENABLE_386_LOG
x386_dynarec_log("Triple fault - reset\n");
#endif
}
}
} else if (trap) {
flags_rebuild();
#ifdef USE_DEBUG_REGS_486
if (trap & 1)
dr[6] |= 0x4000;
if (trap & 2)
dr[6] |= 0x8000;
#endif
trap = 0;
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
#ifndef USE_DEBUG_REGS_486
dr[6] |= 0x4000;
#endif
x86_int(1);
}
if (smi_line)
enter_smm_check(0);
else if (nmi && nmi_enable && nmi_mask) {
#ifndef USE_NEW_DYNAREC
oldcs = CS;
#endif
cpu_state.oldpc = cpu_state.pc;
x86_int(2);
nmi_enable = 0;
#ifdef OLD_NMI_BEHAVIOR
if (nmi_auto_clear) {
nmi_auto_clear = 0;
nmi = 0;
}
#else
nmi = 0;
#endif
} else if ((cpu_state.flags & I_FLAG) && pic.int_pending && !cpu_end_block_after_ins) {
vector = picinterrupt();
if (vector != -1) {
flags_rebuild();
if (msw & 1)
pmodeint(vector, 0);
else {
writememw(ss, (SP - 2) & 0xFFFF, cpu_state.flags);
writememw(ss, (SP - 4) & 0xFFFF, CS);
writememw(ss, (SP - 6) & 0xFFFF, cpu_state.pc);
SP -= 6;
addr = (vector << 2) + idt.base;
cpu_state.flags &= ~I_FLAG;
cpu_state.flags &= ~T_FLAG;
cpu_state.pc = readmemw(0, addr);
loadcs(readmemw(0, addr + 2));
}
}
}
ins_cycles -= cycles;
tsc += ins_cycles;
cycdiff = oldcyc - cycles;
if (timetolive) {
timetolive--;
if (!timetolive)
fatal("Life expired\n");
}
if (TIMER_VAL_LESS_THAN_VAL(timer_target, (uint32_t) tsc))
timer_process();
#ifdef USE_GDBSTUB
if (gdbstub_instruction())
return;
#endif
}
}
}
``` | /content/code_sandbox/src/cpu/386_dynarec.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 8,311 |
```c
/*Elements taken into account :
- X/Y pairing
- FPU/FXCH pairing
- Prefix decode delay
- AGI stalls
Elements not taken into account :
- Branch prediction (beyond most simplistic approximation)
- FPU queue
- Out of order execution (beyond most simplistic approximation)
*/
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include <86box/plat_unused.h>
#include "x86.h"
#include "x86_ops.h"
#include "x87_sf.h"
#include "x87.h"
#include "codegen.h"
#include "codegen_timing_common.h"
/*Instruction has different execution time for 16 and 32 bit data. Does not pair */
#define CYCLES_HAS_MULTI (1 << 31)
#define CYCLES_MULTI(c16, c32) (CYCLES_HAS_MULTI | c16 | (c32 << 8))
/*Instruction lasts given number of cycles. Does not pair*/
#define CYCLES(c) (c)
/*Instruction follows either register timing, read-modify, or read-modify-write.
May be pairable*/
#define CYCLES_REG (1 << 0)
#define CYCLES_RM (1 << 0)
#define CYCLES_RMW (1 << 0)
#define CYCLES_BRANCH (1 << 0)
#define CYCLES_MASK ((1 << 7) - 1)
/*Instruction does not pair*/
#define PAIR_NP (0 << 29)
/*Instruction pairs in X pipe only*/
#define PAIR_X (1 << 29)
/*Instruction pairs in X pipe only, and can not pair with a following instruction*/
#define PAIR_X_BRANCH (2 << 29)
/*Instruction pairs in both X and Y pipes*/
#define PAIR_XY (3 << 29)
#define PAIR_MASK (3 << 29)
#define INVALID 0
static int prev_full;
static uint32_t prev_opcode;
static uint32_t *prev_timings;
static uint32_t prev_op_32;
static uint32_t prev_regmask;
static uint64_t *prev_deps;
static uint32_t prev_fetchdat;
static uint32_t last_regmask_modified;
static uint32_t regmask_modified;
static uint32_t opcode_timings_686[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* ADD ADD PUSH ES POP ES*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* OR OR OR OR*/
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* OR OR PUSH CS */
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), INVALID,
/* ADC ADC ADC ADC*/
/*10*/ PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* ADC ADC PUSH SS POP SS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* SBB SBB SBB SBB*/
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* SBB SBB PUSH DS POP DS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* AND AND AND AND*/
/*20*/ PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* AND AND DAA*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* SUB SUB SUB SUB*/
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* SUB SUB DAS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* XOR XOR XOR XOR*/
/*30*/ PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* XOR XOR AAA*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* CMP CMP CMP CMP*/
PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM,
/* CMP CMP AAS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* INC ESP INC EBP INC ESI INC EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* POP EAX POP ECX POP EDX POP EBX*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* POP ESP POP EBP POP ESI POP EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* PUSHA POPA BOUND ARPL*/
/*60*/ PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(9),
INVALID, INVALID, INVALID, INVALID,
/* PUSH imm IMUL PUSH imm IMUL*/
PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(10), PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(10),
/* INSB INSW OUTSB OUTSW*/
PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14),
/* Jxx*/
/*70*/ PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
/*80*/ INVALID, INVALID, INVALID, INVALID,
/* TEST TEST XCHG XCHG*/
PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES_RM, PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
/* MOV MOV MOV MOV*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* MOV from seg LEA MOV to seg POP*/
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES_REG, CYCLES(3), PAIR_XY | CYCLES(1),
/* NOP XCHG XCHG XCHG*/
/*90*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
/* XCHG XCHG XCHG XCHG*/
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
/* CBW CWD CALL far WAIT*/
PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(2), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5),
/* PUSHF POPF SAHF LAHF*/
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(9), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(2),
/* MOV MOV MOV MOV*/
/*a0*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* MOVSB MOVSW CMPSB CMPSW*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5),
/* TEST TEST STOSB STOSW*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* LODSB LODSW SCASB SCASW*/
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* MOV*/
/*b0*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* RET imm RET*/
/*c0*/ INVALID, INVALID, PAIR_X_BRANCH | CYCLES(3), PAIR_X_BRANCH | CYCLES(2),
/* LES LDS MOV MOV*/
PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4), PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* ENTER LEAVE RETF RETF*/
PAIR_XY | CYCLES(10), PAIR_XY | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4),
/* INT3 INT INTO IRET*/
PAIR_NP | CYCLES(13), PAIR_NP | CYCLES(16), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(10),
/*d0*/ INVALID, INVALID, INVALID, INVALID,
/* AAM AAD SETALC XLAT*/
PAIR_XY | CYCLES(18), PAIR_XY | CYCLES(7), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(4),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ PAIR_X_BRANCH| CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14),
/* CALL JMP JMP JMP*/
PAIR_X_BRANCH | CYCLES_REG, PAIR_X_BRANCH | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_X_BRANCH | CYCLES_REG,
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14),
/* REPNE REPE*/
/*f0*/ INVALID, INVALID, PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(0),
/* HLT CMC*/
PAIR_NP | CYCLES(5), PAIR_XY | CYCLES(2), INVALID, INVALID,
/* CLC STC CLI STI*/
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(7), PAIR_XY | CYCLES(7),
/* CLD STD INCDEC*/
PAIR_XY | CYCLES(7), PAIR_XY | CYCLES(7), PAIR_XY | CYCLES_RMW, INVALID
// clang-format on
};
static uint32_t opcode_timings_686_mod3[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* ADD ADD PUSH ES POP ES*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* OR OR OR OR*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* OR OR PUSH CS */
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), INVALID,
/* ADC ADC ADC ADC*/
/*10*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* ADC ADC PUSH SS POP SS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* SBB SBB SBB SBB*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* SBB SBB PUSH DS POP DS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(3),
/* AND AND AND AND*/
/*20*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* AND AND DAA*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* SUB SUB SUB SUB*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* SUB SUB DAS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* XOR XOR XOR XOR*/
/*30*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* XOR XOR AAA*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* CMP CMP CMP CMP*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* CMP CMP AAS*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, INVALID, PAIR_NP | CYCLES(7),
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* INC ESP INC EBP INC ESI INC EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* POP EAX POP ECX POP EDX POP EBX*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* POP ESP POP EBP POP ESI POP EDI*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* PUSHA POPA BOUND ARPL*/
/*60*/ PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(9),
INVALID, INVALID, INVALID, INVALID,
/* PUSH imm IMUL PUSH imm IMUL*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(10), PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(10),
/* INSB INSW OUTSB OUTSW*/
PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14),
/* Jxx*/
/*70*/ PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
/*80*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* TEST TEST XCHG XCHG*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
/* MOV MOV MOV MOV*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* MOV from seg LEA MOV to seg POP*/
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(3), PAIR_XY | CYCLES(1),
/* NOP XCHG XCHG XCHG*/
/*90*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
/* XCHG XCHG XCHG XCHG*/
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
/* CBW CWD CALL far WAIT*/
PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(2), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5),
/* PUSHF POPF SAHF LAHF*/
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(9), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(2),
/* MOV MOV MOV MOV*/
/*a0*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* MOVSB MOVSW CMPSB CMPSW*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5), PAIR_NP | CYCLES(5),
/* TEST TEST STOSB STOSW*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* LODSB LODSW SCASB SCASW*/
PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(3), PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2),
/* MOV*/
/*b0*/ PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* RET imm RET*/
/*c0*/ INVALID, INVALID, PAIR_X_BRANCH | CYCLES(3), PAIR_X_BRANCH | CYCLES(2),
/* LES LDS MOV MOV*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
/* ENTER LEAVE RETF RETF*/
PAIR_XY | CYCLES(13), PAIR_XY | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4),
/* INT3 INT INTO IRET*/
PAIR_NP | CYCLES(13), PAIR_NP | CYCLES(16), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(10),
/*d0*/ INVALID, INVALID, INVALID, INVALID,
/* AAM AAD SETALC XLAT*/
PAIR_XY | CYCLES(18), PAIR_XY | CYCLES(7), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(4),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ PAIR_X_BRANCH| CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14),
/* CALL JMP JMP JMP*/
PAIR_X_BRANCH | CYCLES_REG, PAIR_X_BRANCH | CYCLES_REG, PAIR_NP | CYCLES(1), PAIR_X_BRANCH | CYCLES_REG,
/* IN AL IN AX OUT_AL OUT_AX*/
PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14), PAIR_NP | CYCLES(14),
/* REPNE REPE*/
/*f0*/ INVALID, INVALID, PAIR_NP | CYCLES(0), PAIR_NP | CYCLES(0),
/* HLT CMC*/
PAIR_NP | CYCLES(4), PAIR_XY | CYCLES(2), INVALID, INVALID,
/* CLC STC CLI STI*/
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(7), PAIR_XY | CYCLES(7),
/* CLD STD INCDEC*/
PAIR_XY | CYCLES(7), PAIR_XY | CYCLES(7), PAIR_XY | CYCLES_REG, INVALID
// clang-format on
};
static uint32_t opcode_timings_686_0f[256] = {
// clang-format off
/*00*/ PAIR_NP | CYCLES(20), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(10),
INVALID, PAIR_NP | CYCLES(195), PAIR_NP | CYCLES(7), INVALID,
PAIR_NP | CYCLES(1000), PAIR_NP | CYCLES(10000), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*10*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*20*/ PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6),
PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*30*/ PAIR_NP | CYCLES(9), CYCLES(1), PAIR_NP | CYCLES(9), INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*40*/ PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/*50*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*60*/ PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
INVALID, INVALID, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
/*70*/ INVALID, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES(1),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
/*80*/ PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
/*90*/ PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/*a0*/ PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(12), PAIR_XY | CYCLES(5),
PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(5), INVALID, INVALID,
PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(1), INVALID, PAIR_XY | CYCLES(5),
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5), INVALID, PAIR_NP | CYCLES(10),
/*b0*/ PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(4), PAIR_XY | CYCLES(5),
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
INVALID, INVALID, PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(5),
PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(1), INVALID,
/*c0*/ PAIR_NP | CYCLES(2), PAIR_NP | CYCLES(2), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4),
PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(4),
/*d0*/ INVALID, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
INVALID, PAIR_X | CYCLES_RM, INVALID, INVALID,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID, PAIR_X | CYCLES_RM,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID, PAIR_X | CYCLES_RM,
/*e0*/ INVALID, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID,
INVALID, PAIR_X | CYCLES_RM, INVALID, INVALID,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID, PAIR_X | CYCLES_RM,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID, PAIR_X | CYCLES_RM,
/*f0*/ INVALID, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM,
INVALID, PAIR_X | CYCLES_RM, INVALID, INVALID,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID,
PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, PAIR_X | CYCLES_RM, INVALID,
// clang-format on
};
static uint32_t opcode_timings_686_0f_mod3[256] = {
// clang-format off
/*00*/ PAIR_NP | CYCLES(20), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(10),
INVALID, PAIR_NP | CYCLES(195), PAIR_NP | CYCLES(7), INVALID,
PAIR_NP | CYCLES(1000), PAIR_NP | CYCLES(10000), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*10*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*20*/ PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6),
PAIR_NP | CYCLES(6), PAIR_NP | CYCLES(6), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*30*/ PAIR_NP | CYCLES(9), CYCLES(1), PAIR_NP | CYCLES(9), INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*40*/ PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/*50*/ INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*60*/ PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
INVALID, INVALID, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
/*70*/ INVALID, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES(1),
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
/*80*/ PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH, PAIR_X_BRANCH | CYCLES_BRANCH,
/*90*/ PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/*a0*/ PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(12), PAIR_XY | CYCLES(5),
PAIR_XY | CYCLES(4), PAIR_XY | CYCLES(5), INVALID, INVALID,
PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(1), INVALID, PAIR_XY | CYCLES(5),
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(5), INVALID, PAIR_NP | CYCLES(10),
/*b0*/ PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(11), PAIR_NP | CYCLES(4), PAIR_XY | CYCLES(5),
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
INVALID, INVALID, PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(5),
PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(1), INVALID,
/*c0*/ PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1),
PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1), PAIR_NP | CYCLES(1),
/*d0*/ INVALID, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
INVALID, PAIR_X | CYCLES_REG, INVALID, INVALID,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID, PAIR_X | CYCLES_REG,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID, PAIR_X | CYCLES_REG,
/*e0*/ INVALID, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID,
INVALID, PAIR_X | CYCLES_REG, INVALID, INVALID,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID, PAIR_X | CYCLES_REG,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID, PAIR_X | CYCLES_REG,
/*f0*/ INVALID, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG,
INVALID, PAIR_X | CYCLES_REG, INVALID, INVALID,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID,
PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, PAIR_X | CYCLES_REG, INVALID,
// clang-format on
};
static uint32_t opcode_timings_686_shift[8] = {
// clang-format off
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(4),
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW,
// clang-format on
};
static uint32_t opcode_timings_686_shift_mod3[8] = {
// clang-format off
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(4),
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
// clang-format on
};
static uint32_t opcode_timings_686_shift_imm[8] = {
// clang-format off
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES(8), PAIR_XY | CYCLES(9),
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW,
// clang-format on
};
static uint32_t opcode_timings_686_shift_imm_mod3[8] = {
// clang-format off
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES(3), PAIR_XY | CYCLES(4),
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
// clang-format on
};
static uint32_t opcode_timings_686_shift_cl[8] = {
// clang-format off
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(8), PAIR_XY | CYCLES(9),
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
// clang-format on
};
static uint32_t opcode_timings_686_shift_cl_mod3[8] = {
// clang-format off
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(8), PAIR_XY | CYCLES(9),
PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2), PAIR_XY | CYCLES(2),
// clang-format on
};
static uint32_t opcode_timings_686_f6[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_XY | CYCLES_RM, INVALID, PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(18), PAIR_NP | CYCLES(18)
// clang-format on
};
static uint32_t opcode_timings_686_f6_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_XY | CYCLES_REG, INVALID, PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(4), PAIR_NP | CYCLES(18), PAIR_NP | CYCLES(18)
// clang-format on
};
static uint32_t opcode_timings_686_f7[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_XY | CYCLES_REG, INVALID, PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES_MULTI(4,10), PAIR_NP | CYCLES_MULTI(4,10), PAIR_NP | CYCLES_MULTI(19,27), PAIR_NP | CYCLES_MULTI(22,30)
// clang-format on
};
static uint32_t opcode_timings_686_f7_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
PAIR_XY | CYCLES_REG, INVALID, PAIR_XY | CYCLES(1), PAIR_XY | CYCLES(1),
/* MUL IMUL DIV IDIV*/
PAIR_NP | CYCLES_MULTI(4,10), PAIR_NP | CYCLES_MULTI(4,10), PAIR_NP | CYCLES_MULTI(19,27), PAIR_NP | CYCLES_MULTI(22,30)
// clang-format on
};
static uint32_t opcode_timings_686_ff[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_X_BRANCH | CYCLES(3), PAIR_NP | CYCLES(5),
/* JMP JMP far PUSH*/
PAIR_X_BRANCH | CYCLES(3), PAIR_NP | CYCLES(5), PAIR_XY | CYCLES(1), INVALID
// clang-format on
};
static uint32_t opcode_timings_686_ff_mod3[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_X_BRANCH | CYCLES(1), PAIR_XY | CYCLES(5),
/* JMP JMP far PUSH*/
PAIR_X_BRANCH | CYCLES(1), PAIR_XY | CYCLES(5), PAIR_XY | CYCLES(2), INVALID
// clang-format on
};
static uint32_t opcode_timings_686_d8[8] = {
// clang-format off
/* FADDs FMULs FCOMs FCOMPs*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(6), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
/* FSUBs FSUBRs FDIVs FDIVRs*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(34), PAIR_X | CYCLES(34)
// clang-format on
};
static uint32_t opcode_timings_686_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(6), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
/* FSUB FSUBR FDIV FDIVR*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(34), PAIR_X | CYCLES(34)
// clang-format on
};
static uint32_t opcode_timings_686_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs*/
PAIR_X | CYCLES(2), INVALID, PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
/* FLDENV FLDCW FSTENV FSTCW*/
PAIR_X | CYCLES(30), PAIR_X | CYCLES(4), PAIR_X | CYCLES(24), PAIR_X | CYCLES(5)
// clang-format on
};
static uint32_t opcode_timings_686_d9_mod3[64] = {
// clang-format off
/*FLD*/
PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
/*FXCH*/
PAIR_X | CYCLES(3), PAIR_X | CYCLES(3), PAIR_X | CYCLES(3), PAIR_X | CYCLES(3),
PAIR_X | CYCLES(3), PAIR_X | CYCLES(3), PAIR_X | CYCLES(3), PAIR_X | CYCLES(3),
/*FNOP*/
PAIR_X | CYCLES(2), INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
/*FSTP*/
PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
/* opFCHS opFABS*/
PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), INVALID, INVALID,
/* opFTST opFXAM (oddly low) */
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), INVALID, INVALID,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI*/
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
/* opFLDEG2 opFLDLN2 opFLDZ*/
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), INVALID,
/* opF2XM1 opFYL2X opFPTAN opFPATAN*/
PAIR_X | CYCLES(92), PAIR_X | CYCLES(170), PAIR_X | CYCLES(129), PAIR_X | CYCLES(161),
/* opFDECSTP opFINCSTP,*/
INVALID, INVALID, PAIR_X | CYCLES(4), PAIR_X | CYCLES(2),
/* opFPREM opFSQRT opFSINCOS*/
PAIR_X | CYCLES(91), INVALID, PAIR_X | CYCLES(60), PAIR_X | CYCLES(161),
/* opFRNDINT opFSCALE opFSIN opFCOS*/
PAIR_X | CYCLES(20), PAIR_X | CYCLES(14), PAIR_X | CYCLES(140), PAIR_X | CYCLES(141)
// clang-format on
};
static uint32_t opcode_timings_686_da[8] = {
// clang-format off
/* FIADDl FIMULl FICOMl FICOMPl*/
PAIR_X | CYCLES(12), PAIR_X | CYCLES(11), PAIR_X | CYCLES(10), PAIR_X | CYCLES(10),
/* FISUBl FISUBRl FIDIVl FIDIVRl*/
PAIR_X | CYCLES(29), PAIR_X | CYCLES(27), PAIR_X | CYCLES(38), PAIR_X | CYCLES(48)
// clang-format on
};
static uint32_t opcode_timings_686_da_mod3[8] = {
// clang-format off
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
INVALID, PAIR_X | CYCLES(5), INVALID, INVALID
// clang-format on
};
static uint32_t opcode_timings_686_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil*/
PAIR_X | CYCLES(2), INVALID, PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
/* FLDe FSTPe*/
INVALID, PAIR_X | CYCLES(2), INVALID, PAIR_X | CYCLES(2)
// clang-format on
};
static uint32_t opcode_timings_686_db_mod3[64] = {
// clang-format off
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), PAIR_X | CYCLES(4),
/* opFNOP opFCLEX opFINIT*/
INVALID, PAIR_X | CYCLES(2), PAIR_X | CYCLES(5), PAIR_X | CYCLES(8),
/* opFNOP opFNOP*/
PAIR_X | CYCLES(2), PAIR_X | CYCLES(2), INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
INVALID, INVALID, INVALID, INVALID,
// clang-format on
};
static uint32_t opcode_timings_686_dc[8] = {
// clang-format off
/* FADDd FMULd FCOMd FCOMPd*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(7),
/* FSUBd FSUBRd FDIVd FDIVRd*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(34), PAIR_X | CYCLES(34)
// clang-format on
};
static uint32_t opcode_timings_686_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), INVALID, INVALID,
/* opFSUBRr opFSUBr opFDIVRr opFDIVr*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(34), PAIR_X | CYCLES(34)
// clang-format on
};
static uint32_t opcode_timings_686_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd*/
PAIR_X | CYCLES(2), INVALID, PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
/* FRSTOR FSAVE FSTSW*/
PAIR_X | CYCLES(72), INVALID, PAIR_X | CYCLES(67), PAIR_X | CYCLES(2)
// clang-format on
};
static uint32_t opcode_timings_686_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP*/
PAIR_X | CYCLES(3), INVALID, PAIR_X | CYCLES(2), PAIR_X | CYCLES(2),
/* FUCOM FUCOMP*/
PAIR_X | CYCLES(4), PAIR_X | CYCLES(4), INVALID, INVALID
// clang-format on
};
static uint32_t opcode_timings_686_de[8] = {
// clang-format off
/* FIADDw FIMULw FICOMw FICOMPw*/
PAIR_X | CYCLES(12), PAIR_X | CYCLES(11), PAIR_X | CYCLES(10), PAIR_X | CYCLES(10),
/* FISUBw FISUBRw FIDIVw FIDIVRw*/
PAIR_X | CYCLES(27), PAIR_X | CYCLES(27), PAIR_X | CYCLES(38), PAIR_X | CYCLES(38)
};
static uint32_t opcode_timings_686_de_mod3[8] = {
// clang-format off
/* FADD FMUL FCOMPP*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), INVALID, PAIR_X | CYCLES(7),
/* FSUB FSUBR FDIV FDIVR*/
PAIR_X | CYCLES(7), PAIR_X | CYCLES(7), PAIR_X | CYCLES(34), PAIR_X | CYCLES(34)
// clang-format on
};
static uint32_t opcode_timings_686_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw*/
PAIR_X | CYCLES(8), INVALID, PAIR_X | CYCLES(10), PAIR_X | CYCLES(13),
/* FILDiq FBSTP FISTPiq*/
INVALID, PAIR_X | CYCLES(8), PAIR_X | CYCLES(63), PAIR_X | CYCLES(13)
// clang-format on
};
static uint32_t opcode_timings_686_df_mod3[8] = {
// clang-format off
INVALID, INVALID, INVALID, INVALID,
/* FSTSW AX*/
PAIR_X | CYCLES(6), INVALID, INVALID, INVALID
// clang-format on
};
static uint32_t opcode_timings_686_8x[8] = {
// clang-format off
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW,
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM
// clang-format on
};
static uint32_t opcode_timings_686_8x_mod3[8] = {
// clang-format off
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG
// clang-format on
};
static uint32_t opcode_timings_686_81[8] = {
// clang-format off
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW,
PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RMW, PAIR_XY | CYCLES_RM
// clang-format on
};
static uint32_t opcode_timings_686_81_mod3[8] = {
// clang-format off
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG,
PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG, PAIR_XY | CYCLES_REG
// clang-format on
};
static int decode_delay;
static uint8_t last_prefix;
static inline int
COUNT(uint32_t c, int op_32)
{
if (c & CYCLES_HAS_MULTI) {
if (op_32 & 0x100)
return ((uintptr_t) c >> 8) & 0xff;
return (uintptr_t) c & 0xff;
}
if (!(c & PAIR_MASK))
return c & 0xffff;
return c & CYCLES_MASK;
}
void
codegen_timing_686_block_start(void)
{
prev_full = decode_delay = 0;
regmask_modified = last_regmask_modified = 0;
}
void
codegen_timing_686_start(void)
{
decode_delay = 0;
last_prefix = 0;
}
void
codegen_timing_686_prefix(uint8_t prefix, uint32_t fetchdat)
{
if ((prefix & 0xf8) == 0xd8) {
last_prefix = prefix;
return;
}
if (prefix == 0x0f && (fetchdat & 0xf0) == 0x80) {
/*0fh prefix is 'free' when used on conditional jumps*/
last_prefix = prefix;
return;
}
/*6x86 can decode 1 prefix per instruction per clock with no penalty. If
either instruction has more than one prefix then decode is delayed by
one cycle for each additional prefix*/
decode_delay++;
last_prefix = prefix;
}
static int
check_agi(uint64_t *deps, uint8_t opcode, uint32_t fetchdat, int op_32)
{
uint32_t addr_regmask = get_addr_regmask(deps[opcode], fetchdat, op_32);
if (addr_regmask & IMPL_ESP)
addr_regmask |= (1 << REG_ESP);
if (regmask_modified & addr_regmask) {
regmask_modified = 0;
return 2;
}
if (last_regmask_modified & addr_regmask)
return 1;
return 0;
}
void
codegen_timing_686_opcode(uint8_t opcode, uint32_t fetchdat, int op_32, UNUSED(uint32_t op_pc))
{
uint32_t *timings;
uint64_t *deps;
int mod3 = ((fetchdat & 0xc0) == 0xc0);
int bit8 = !(opcode & 1);
switch (last_prefix) {
case 0x0f:
timings = mod3 ? opcode_timings_686_0f_mod3 : opcode_timings_686_0f;
deps = mod3 ? opcode_deps_0f_mod3 : opcode_deps_0f;
break;
case 0xd8:
timings = mod3 ? opcode_timings_686_d8_mod3 : opcode_timings_686_d8;
deps = mod3 ? opcode_deps_d8_mod3 : opcode_deps_d8;
opcode = (opcode >> 3) & 7;
break;
case 0xd9:
timings = mod3 ? opcode_timings_686_d9_mod3 : opcode_timings_686_d9;
deps = mod3 ? opcode_deps_d9_mod3 : opcode_deps_d9;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xda:
timings = mod3 ? opcode_timings_686_da_mod3 : opcode_timings_686_da;
deps = mod3 ? opcode_deps_da_mod3 : opcode_deps_da;
opcode = (opcode >> 3) & 7;
break;
case 0xdb:
timings = mod3 ? opcode_timings_686_db_mod3 : opcode_timings_686_db;
deps = mod3 ? opcode_deps_db_mod3 : opcode_deps_db;
opcode = mod3 ? opcode & 0x3f : (opcode >> 3) & 7;
break;
case 0xdc:
timings = mod3 ? opcode_timings_686_dc_mod3 : opcode_timings_686_dc;
deps = mod3 ? opcode_deps_dc_mod3 : opcode_deps_dc;
opcode = (opcode >> 3) & 7;
break;
case 0xdd:
timings = mod3 ? opcode_timings_686_dd_mod3 : opcode_timings_686_dd;
deps = mod3 ? opcode_deps_dd_mod3 : opcode_deps_dd;
opcode = (opcode >> 3) & 7;
break;
case 0xde:
timings = mod3 ? opcode_timings_686_de_mod3 : opcode_timings_686_de;
deps = mod3 ? opcode_deps_de_mod3 : opcode_deps_de;
opcode = (opcode >> 3) & 7;
break;
case 0xdf:
timings = mod3 ? opcode_timings_686_df_mod3 : opcode_timings_686_df;
deps = mod3 ? opcode_deps_df_mod3 : opcode_deps_df;
opcode = (opcode >> 3) & 7;
break;
default:
switch (opcode) {
case 0x80:
case 0x82:
case 0x83:
timings = mod3 ? opcode_timings_686_8x_mod3 : opcode_timings_686_8x;
deps = mod3 ? opcode_deps_8x_mod3 : opcode_deps_8x;
opcode = (fetchdat >> 3) & 7;
break;
case 0x81:
timings = mod3 ? opcode_timings_686_81_mod3 : opcode_timings_686_81;
deps = mod3 ? opcode_deps_81_mod3 : opcode_deps_81;
opcode = (fetchdat >> 3) & 7;
break;
case 0xc0:
case 0xc1:
timings = mod3 ? opcode_timings_686_shift_imm_mod3 : opcode_timings_686_shift_imm;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xd0:
case 0xd1:
timings = mod3 ? opcode_timings_686_shift_mod3 : opcode_timings_686_shift;
deps = mod3 ? opcode_deps_shift_mod3 : opcode_deps_shift;
opcode = (fetchdat >> 3) & 7;
break;
case 0xd2:
case 0xd3:
timings = mod3 ? opcode_timings_686_shift_cl_mod3 : opcode_timings_686_shift_cl;
deps = mod3 ? opcode_deps_shift_cl_mod3 : opcode_deps_shift_cl;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf6:
timings = mod3 ? opcode_timings_686_f6_mod3 : opcode_timings_686_f6;
deps = mod3 ? opcode_deps_f6_mod3 : opcode_deps_f6;
opcode = (fetchdat >> 3) & 7;
break;
case 0xf7:
timings = mod3 ? opcode_timings_686_f7_mod3 : opcode_timings_686_f7;
deps = mod3 ? opcode_deps_f7_mod3 : opcode_deps_f7;
opcode = (fetchdat >> 3) & 7;
break;
case 0xff:
timings = mod3 ? opcode_timings_686_ff_mod3 : opcode_timings_686_ff;
deps = mod3 ? opcode_deps_ff_mod3 : opcode_deps_ff;
opcode = (fetchdat >> 3) & 7;
break;
default:
timings = mod3 ? opcode_timings_686_mod3 : opcode_timings_686;
deps = mod3 ? opcode_deps_mod3 : opcode_deps;
break;
}
}
/*One prefix per instruction is free*/
decode_delay--;
if (decode_delay < 0)
decode_delay = 0;
if (prev_full) {
uint32_t regmask = get_srcdep_mask(deps[opcode], fetchdat, bit8, op_32);
int agi_stall = 0;
if (regmask & IMPL_ESP)
regmask |= SRCDEP_ESP | DSTDEP_ESP;
agi_stall = check_agi(prev_deps, prev_opcode, prev_fetchdat, prev_op_32);
/*Second instruction in the pair*/
if ((timings[opcode] & PAIR_MASK) == PAIR_NP) {
/*Instruction can not pair with previous*/
/*Run previous now*/
codegen_block_cycles += COUNT(prev_timings[prev_opcode], prev_op_32) + decode_delay + agi_stall;
decode_delay = (-COUNT(prev_timings[prev_opcode], prev_op_32)) + 1 + agi_stall;
prev_full = 0;
last_regmask_modified = regmask_modified;
regmask_modified = prev_regmask;
} else if (((timings[opcode] & PAIR_MASK) == PAIR_X || (timings[opcode] & PAIR_MASK) == PAIR_X_BRANCH)
&& (prev_timings[opcode] & PAIR_MASK) == PAIR_X) {
/*Instruction can not pair with previous*/
/*Run previous now*/
codegen_block_cycles += COUNT(prev_timings[prev_opcode], prev_op_32) + decode_delay + agi_stall;
decode_delay = (-COUNT(prev_timings[prev_opcode], prev_op_32)) + 1 + agi_stall;
prev_full = 0;
last_regmask_modified = regmask_modified;
regmask_modified = prev_regmask;
} else if (prev_regmask & regmask) {
/*Instruction can not pair with previous*/
/*Run previous now*/
codegen_block_cycles += COUNT(prev_timings[prev_opcode], prev_op_32) + decode_delay + agi_stall;
decode_delay = (-COUNT(prev_timings[prev_opcode], prev_op_32)) + 1 + agi_stall;
prev_full = 0;
last_regmask_modified = regmask_modified;
regmask_modified = prev_regmask;
} else {
int t1 = COUNT(prev_timings[prev_opcode], prev_op_32);
int t2 = COUNT(timings[opcode], op_32);
int t_pair = (t1 > t2) ? t1 : t2;
if (!t_pair)
fatal("Pairable 0 cycles! %02x %02x\n", opcode, prev_opcode);
agi_stall = check_agi(deps, opcode, fetchdat, op_32);
codegen_block_cycles += t_pair + agi_stall;
decode_delay = (-t_pair) + 1 + agi_stall;
last_regmask_modified = regmask_modified;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8) | prev_regmask;
prev_full = 0;
return;
}
}
if (!prev_full) {
/*First instruction in the pair*/
if ((timings[opcode] & PAIR_MASK) == PAIR_NP || (timings[opcode] & PAIR_MASK) == PAIR_X_BRANCH) {
/*Instruction not pairable*/
int agi_stall = 0;
agi_stall = check_agi(deps, opcode, fetchdat, op_32);
codegen_block_cycles += COUNT(timings[opcode], op_32) + decode_delay + agi_stall;
decode_delay = (-COUNT(timings[opcode], op_32)) + 1 + agi_stall;
last_regmask_modified = regmask_modified;
regmask_modified = get_dstdep_mask(deps[opcode], fetchdat, bit8);
} else {
/*Instruction might pair with next*/
prev_full = 1;
prev_opcode = opcode;
prev_timings = timings;
prev_op_32 = op_32;
prev_regmask = get_dstdep_mask(deps[opcode], fetchdat, bit8);
if (prev_regmask & IMPL_ESP)
prev_regmask |= SRCDEP_ESP | DSTDEP_ESP;
prev_deps = deps;
prev_fetchdat = fetchdat;
return;
}
}
}
void
codegen_timing_686_block_end(void)
{
if (prev_full) {
/*Run previous now*/
codegen_block_cycles += COUNT(prev_timings[prev_opcode], prev_op_32) + decode_delay;
prev_full = 0;
}
}
codegen_timing_t codegen_timing_686 = {
codegen_timing_686_start,
codegen_timing_686_prefix,
codegen_timing_686_opcode,
codegen_timing_686_block_start,
codegen_timing_686_block_end,
NULL
};
``` | /content/code_sandbox/src/cpu/codegen_timing_686.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 19,606 |
```objective-c
#ifdef USE_NEW_DYNAREC
# define CPU_SET_OXPC
#else
# define CPU_SET_OXPC oxpc = cpu_state.pc;
#endif
#define RETF_a16(stack_offset) \
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) { \
op_pmoderetf(0, stack_offset); \
return 1; \
} \
CPU_SET_OXPC \
if (stack32) { \
cpu_state.pc = readmemw(ss, ESP); \
op_loadcs(readmemw(ss, ESP + 2)); \
} else { \
cpu_state.pc = readmemw(ss, SP); \
op_loadcs(readmemw(ss, SP + 2)); \
} \
if (cpu_state.abrt) \
return 1; \
if (stack32) \
ESP += 4 + stack_offset; \
else \
SP += 4 + stack_offset; \
cycles -= timing_retf_rm;
#define RETF_a32(stack_offset) \
if ((msw & 1) && !(cpu_state.eflags & VM_FLAG)) { \
op_pmoderetf(1, stack_offset); \
return 1; \
} \
CPU_SET_OXPC \
if (stack32) { \
cpu_state.pc = readmeml(ss, ESP); \
op_loadcs(readmeml(ss, ESP + 4) & 0xffff); \
} else { \
cpu_state.pc = readmeml(ss, SP); \
op_loadcs(readmeml(ss, SP + 4) & 0xffff); \
} \
if (cpu_state.abrt) \
return 1; \
if (stack32) \
ESP += 8 + stack_offset; \
else \
SP += 8 + stack_offset; \
cycles -= timing_retf_rm;
static int
opRETF_a16(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a16(0);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRETF_a32(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a32(0);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 2, 0, 0, 1);
PREFETCH_FLUSH();
return 0;
}
static int
opRETF_a16_imm(uint32_t fetchdat)
{
uint16_t offset = getwordf();
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a16(offset);
PREFETCH_RUN(cycles_old - cycles, 3, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return 0;
}
static int
opRETF_a32_imm(uint32_t fetchdat)
{
uint16_t offset = getwordf();
int cycles_old = cycles;
UN_USED(cycles_old);
CPU_BLOCK_END();
RETF_a32(offset);
PREFETCH_RUN(cycles_old - cycles, 3, -1, 0, 2, 0, 0, 1);
PREFETCH_FLUSH();
return 0;
}
static int
opIRET_186(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
if (msw & 1) {
optype = IRET;
op_pmodeiret(0);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmemw(ss, ESP);
new_cs = readmemw(ss, ESP + 2);
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ESP + 4) & 0xffd5) | 2;
ESP += 6;
} else {
cpu_state.pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ((SP + 4) & 0xffff)) & 0x0fd5) | 2;
SP += 6;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
flags_extract();
nmi_enable = 1;
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
static int
opIRET_286(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
if (msw & 1) {
optype = IRET;
op_pmodeiret(0);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmemw(ss, ESP);
new_cs = readmemw(ss, ESP + 2);
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ESP + 4) & 0xffd5) | 2;
ESP += 6;
} else {
cpu_state.pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
cpu_state.flags = (cpu_state.flags & 0x7000) | (readmemw(ss, ((SP + 4) & 0xffff)) & 0x0fd5) | 2;
SP += 6;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
flags_extract();
nmi_enable = 1;
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
static int
opIRET(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
if (cr4 & CR4_VME) {
uint16_t new_pc;
uint16_t new_cs;
uint16_t new_flags;
new_pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
new_flags = readmemw(ss, ((SP + 4) & 0xffff));
if (cpu_state.abrt)
return 1;
if ((new_flags & T_FLAG) || ((new_flags & I_FLAG) && (cpu_state.eflags & VIP_FLAG))) {
x86gpf(NULL, 0);
return 1;
}
SP += 6;
if (new_flags & I_FLAG)
cpu_state.eflags |= VIF_FLAG;
else
cpu_state.eflags &= ~VIF_FLAG;
cpu_state.flags = (cpu_state.flags & 0x3300) | (new_flags & 0x4cd5) | 2;
op_loadcs(new_cs);
cpu_state.pc = new_pc;
cycles -= timing_iret_rm;
} else {
x86gpf_expected(NULL, 0);
return 1;
}
} else {
if (msw & 1) {
optype = IRET;
op_pmodeiret(0);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmemw(ss, ESP);
new_cs = readmemw(ss, ESP + 2);
cpu_state.flags = (readmemw(ss, ESP + 4) & 0xffd5) | 2;
ESP += 6;
} else {
cpu_state.pc = readmemw(ss, SP);
new_cs = readmemw(ss, ((SP + 2) & 0xffff));
cpu_state.flags = (readmemw(ss, ((SP + 4) & 0xffff)) & 0xffd5) | 2;
SP += 6;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
}
flags_extract();
nmi_enable = 1;
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 2, 0, 0, 0, 0);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
static int
opIRETD(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf_expected(NULL, 0);
return 1;
}
if (msw & 1) {
optype = IRET;
op_pmodeiret(1);
optype = 0;
} else {
uint16_t new_cs;
CPU_SET_OXPC
if (stack32) {
cpu_state.pc = readmeml(ss, ESP);
new_cs = readmemw(ss, ESP + 4);
cpu_state.flags = (readmemw(ss, ESP + 8) & 0xffd5) | 2;
cpu_state.eflags = readmemw(ss, ESP + 10);
ESP += 12;
} else {
cpu_state.pc = readmeml(ss, SP);
new_cs = readmemw(ss, ((SP + 4) & 0xffff));
cpu_state.flags = (readmemw(ss, (SP + 8) & 0xffff) & 0xffd5) | 2;
cpu_state.eflags = readmemw(ss, (SP + 10) & 0xffff);
SP += 12;
}
op_loadcs(new_cs);
cycles -= timing_iret_rm;
}
flags_extract();
nmi_enable = 1;
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CPU_BLOCK_END();
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 2, 0, 0, 1);
PREFETCH_FLUSH();
return cpu_state.abrt;
}
``` | /content/code_sandbox/src/cpu/x86_ops_ret.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,626 |
```objective-c
#define MMX_GETSHIFT() \
if (cpu_mod == 3) { \
shift = (MMX_GETREG(cpu_rm)).b[0]; \
CLOCK_CYCLES(1); \
} else { \
SEG_CHECK_READ(cpu_state.ea_seg); \
shift = readmemb(easeg, cpu_state.eaaddr); \
if (cpu_state.abrt) \
return 0; \
CLOCK_CYCLES(2); \
}
static int
opPSxxW_imm(uint32_t fetchdat)
{
int reg = fetchdat & 7;
int op = fetchdat & 0x38;
int shift = (fetchdat >> 8) & 0xff;
MMX_REG *dst;
cpu_state.pc += 2;
MMX_ENTER();
dst = MMX_GETREGP(reg);
switch (op) {
case 0x10: /*PSRLW*/
if (shift > 15)
dst->q = 0;
else {
dst->w[0] >>= shift;
dst->w[1] >>= shift;
dst->w[2] >>= shift;
dst->w[3] >>= shift;
}
break;
case 0x20: /*PSRAW*/
if (shift > 15)
shift = 15;
dst->sw[0] >>= shift;
dst->sw[1] >>= shift;
dst->sw[2] >>= shift;
dst->sw[3] >>= shift;
break;
case 0x30: /*PSLLW*/
if (shift > 15)
dst->q = 0;
else {
dst->w[0] <<= shift;
dst->w[1] <<= shift;
dst->w[2] <<= shift;
dst->w[3] <<= shift;
}
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 0;
}
MMX_SETEXP(reg);
CLOCK_CYCLES(1);
return 0;
}
static int
opPSLLW_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 15)
dst->q = 0;
else {
dst->w[0] <<= shift;
dst->w[1] <<= shift;
dst->w[2] <<= shift;
dst->w[3] <<= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSLLW_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 15)
dst->q = 0;
else {
dst->w[0] <<= shift;
dst->w[1] <<= shift;
dst->w[2] <<= shift;
dst->w[3] <<= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRLW_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 15)
dst->q = 0;
else {
dst->w[0] >>= shift;
dst->w[1] >>= shift;
dst->w[2] >>= shift;
dst->w[3] >>= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRLW_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 15)
dst->q = 0;
else {
dst->w[0] >>= shift;
dst->w[1] >>= shift;
dst->w[2] >>= shift;
dst->w[3] >>= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRAW_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 15)
shift = 15;
dst->sw[0] >>= shift;
dst->sw[1] >>= shift;
dst->sw[2] >>= shift;
dst->sw[3] >>= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRAW_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 15)
shift = 15;
dst->sw[0] >>= shift;
dst->sw[1] >>= shift;
dst->sw[2] >>= shift;
dst->sw[3] >>= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSxxD_imm(uint32_t fetchdat)
{
int reg = fetchdat & 7;
int op = fetchdat & 0x38;
int shift = (fetchdat >> 8) & 0xff;
MMX_REG *dst;
cpu_state.pc += 2;
MMX_ENTER();
dst = MMX_GETREGP(reg);
switch (op) {
case 0x10: /*PSRLD*/
if (shift > 31)
dst->q = 0;
else {
dst->l[0] >>= shift;
dst->l[1] >>= shift;
}
break;
case 0x20: /*PSRAD*/
if (shift > 31)
shift = 31;
dst->sl[0] >>= shift;
dst->sl[1] >>= shift;
break;
case 0x30: /*PSLLD*/
if (shift > 31)
dst->q = 0;
else {
dst->l[0] <<= shift;
dst->l[1] <<= shift;
}
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 0;
}
MMX_SETEXP(reg);
CLOCK_CYCLES(1);
return 0;
}
static int
opPSLLD_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 31)
dst->q = 0;
else {
dst->l[0] <<= shift;
dst->l[1] <<= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSLLD_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 31)
dst->q = 0;
else {
dst->l[0] <<= shift;
dst->l[1] <<= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRLD_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 31)
dst->q = 0;
else {
dst->l[0] >>= shift;
dst->l[1] >>= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRLD_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 31)
dst->q = 0;
else {
dst->l[0] >>= shift;
dst->l[1] >>= shift;
}
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRAD_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 31)
shift = 31;
dst->sl[0] >>= shift;
dst->sl[1] >>= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRAD_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 31)
shift = 31;
dst->sl[0] >>= shift;
dst->sl[1] >>= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSxxQ_imm(uint32_t fetchdat)
{
int reg = fetchdat & 7;
int op = fetchdat & 0x38;
int shift = (fetchdat >> 8) & 0xff;
MMX_REG *dst;
cpu_state.pc += 2;
MMX_ENTER();
dst = MMX_GETREGP(reg);
switch (op) {
case 0x10: /*PSRLW*/
if (shift > 63)
dst->q = 0;
else
dst->q >>= shift;
break;
case 0x20: /*PSRAW*/
if (shift > 63)
shift = 63;
dst->sq >>= shift;
break;
case 0x30: /*PSLLW*/
if (shift > 63)
dst->q = 0;
else
dst->q <<= shift;
break;
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
return 0;
}
MMX_SETEXP(reg);
CLOCK_CYCLES(1);
return 0;
}
static int
opPSLLQ_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 63)
dst->q = 0;
else
dst->q <<= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSLLQ_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 63)
dst->q = 0;
else
dst->q <<= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRLQ_a16(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_16(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 63)
dst->q = 0;
else
dst->q >>= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
static int
opPSRLQ_a32(uint32_t fetchdat)
{
MMX_REG *dst;
int shift;
MMX_ENTER();
fetch_ea_32(fetchdat);
dst = MMX_GETREGP(cpu_reg);
MMX_GETSHIFT();
if (shift > 63)
dst->q = 0;
else
dst->q >>= shift;
MMX_SETEXP(cpu_reg);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mmx_shift.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,900 |
```objective-c
#define REP_OPS(size, CNT_REG, SRC_REG, DEST_REG) \
static int opREP_INSB_##size(uint32_t fetchdat) \
{ \
addr64 = 0x00000000; \
\
if (CNT_REG > 0) { \
uint8_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 1); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = 0; \
do_mmut_wb(es, DEST_REG, &addr64); \
if (cpu_state.abrt) \
return 1; \
temp = inb(DX); \
writememb_n(es, DEST_REG, addr64, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= 15; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_INSW_##size(uint32_t fetchdat) \
{ \
addr64a[0] = addr64a[1] = 0x00000000; \
\
if (CNT_REG > 0) { \
uint16_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 2); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = 0; \
do_mmut_ww(es, DEST_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp = inw(DX); \
writememw_n(es, DEST_REG, addr64a, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= 15; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_INSL_##size(uint32_t fetchdat) \
{ \
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
\
if (CNT_REG > 0) { \
uint32_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 4); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = 0; \
do_mmut_wl(es, DEST_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp = inl(DX); \
writememl_n(es, DEST_REG, addr64a, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= 15; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_OUTSB_##size(uint32_t fetchdat) \
{ \
if (CNT_REG > 0) { \
uint8_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG); \
temp = readmemb(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 1); \
outb(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG--; \
else \
SRC_REG++; \
CNT_REG--; \
cycles -= 14; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_OUTSW_##size(uint32_t fetchdat) \
{ \
if (CNT_REG > 0) { \
uint16_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
temp = readmemw(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 2); \
outw(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 2; \
else \
SRC_REG += 2; \
CNT_REG--; \
cycles -= 14; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_OUTSL_##size(uint32_t fetchdat) \
{ \
if (CNT_REG > 0) { \
uint32_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
temp = readmeml(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 4); \
outl(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 4; \
else \
SRC_REG += 4; \
CNT_REG--; \
cycles -= 14; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_MOVSB_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64 = addr64_2 = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint8_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = 0; \
do_mmut_rb(cpu_state.ea_seg->base, SRC_REG, &addr64); \
if (cpu_state.abrt) \
break; \
do_mmut_wb(es, DEST_REG, &addr64_2); \
if (cpu_state.abrt) \
break; \
temp = readmemb_n(cpu_state.ea_seg->base, SRC_REG, addr64); \
if (cpu_state.abrt) \
return 1; \
writememb_n(es, DEST_REG, addr64_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG--; \
SRC_REG--; \
} else { \
DEST_REG++; \
SRC_REG++; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_MOVSW_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64a[0] = addr64a[1] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint16_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = 0; \
do_mmut_rw(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
break; \
do_mmut_ww(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
break; \
temp = readmemw_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
writememw_n(es, DEST_REG, addr64a_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 2; \
SRC_REG -= 2; \
} else { \
DEST_REG += 2; \
SRC_REG += 2; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_MOVSL_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint32_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = 0; \
do_mmut_rl(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
break; \
do_mmut_wl(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
break; \
temp = readmeml_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
writememl_n(es, DEST_REG, addr64a_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 4; \
SRC_REG -= 4; \
} else { \
DEST_REG += 4; \
SRC_REG += 4; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_STOSB_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
writememb(es, DEST_REG, AL); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_STOSW_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
writememw(es, DEST_REG, AX); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_STOSL_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
writememl(es, DEST_REG, EAX); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_LODSB_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG); \
AL = readmemb(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG--; \
else \
SRC_REG++; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_LODSW_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
AX = readmemw(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 2; \
else \
SRC_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_LODSL_##size(uint32_t fetchdat) \
{ \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
EAX = readmeml(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 4; \
else \
SRC_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
}
#define CHEK_READ(a, b, c)
#define REP_OPS_CMPS_SCAS(size, CNT_REG, SRC_REG, DEST_REG, FV) \
static int opREP_CMPSB_##size(uint32_t fetchdat) \
{ \
int tempz; \
\
addr64 = addr64_2 = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint8_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = uncached = 0; \
do_mmut_rb(cpu_state.ea_seg->base, SRC_REG, &addr64); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rb2(es, DEST_REG, &addr64_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmemb_n(cpu_state.ea_seg->base, SRC_REG, addr64); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = old_rl2; \
temp2 = readmemb_n(es, DEST_REG, addr64_2); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = (uintptr_t) LOOKUP_INV; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG--; \
SRC_REG--; \
} else { \
DEST_REG++; \
SRC_REG++; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
setsub8(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_CMPSW_##size(uint32_t fetchdat) \
{ \
int tempz; \
\
addr64a[0] = addr64a[1] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint16_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = uncached = 0; \
do_mmut_rw(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rw2(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmemw_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = old_rl2; \
temp2 = readmemw_n(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = (uintptr_t) LOOKUP_INV; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 2; \
SRC_REG -= 2; \
} else { \
DEST_REG += 2; \
SRC_REG += 2; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
setsub16(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_CMPSL_##size(uint32_t fetchdat) \
{ \
int tempz; \
\
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint32_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = uncached = 0; \
do_mmut_rl(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rl2(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmeml_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = old_rl2; \
temp2 = readmeml_n(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
if (uncached) \
readlookup2[(uint32_t) (es + DEST_REG) >> 12] = (uintptr_t) LOOKUP_INV; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 4; \
SRC_REG -= 4; \
} else { \
DEST_REG += 4; \
SRC_REG += 4; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
setsub32(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_SCASB_##size(uint32_t fetchdat) \
{ \
int tempz; \
int cycles_end = cycles - 1000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
uint8_t temp = readmemb(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub8(AL, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_SCASW_##size(uint32_t fetchdat) \
{ \
int tempz; \
int cycles_end = cycles - 1000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
uint16_t temp = readmemw(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub16(AX, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_SCASL_##size(uint32_t fetchdat) \
{ \
int tempz; \
int cycles_end = cycles - 1000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
uint32_t temp = readmeml(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub32(EAX, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
}
REP_OPS(a16, CX, SI, DI)
REP_OPS(a32, ECX, ESI, EDI)
REP_OPS_CMPS_SCAS(a16_NE, CX, SI, DI, 0)
REP_OPS_CMPS_SCAS(a16_E, CX, SI, DI, 1)
REP_OPS_CMPS_SCAS(a32_NE, ECX, ESI, EDI, 0)
REP_OPS_CMPS_SCAS(a32_E, ECX, ESI, EDI, 1)
static int
opREPNE(uint32_t fetchdat)
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
CLOCK_CYCLES(2);
if (x86_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
return x86_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
opREPE(uint32_t fetchdat)
{
fetchdat = fastreadl(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
CLOCK_CYCLES(2);
if (x86_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
return x86_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
``` | /content/code_sandbox/src/cpu/x86_ops_rep_dyn.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 8,710 |
```objective-c
static int
opMOV_r_CRx_a16(uint32_t fetchdat)
{
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0:
cpu_state.regs[cpu_rm].l = cr0;
if (is486 || isibm486)
cpu_state.regs[cpu_rm].l |= 0x10; /*ET hardwired on 486*/
else {
if (is386)
cpu_state.regs[cpu_rm].l |= 0x7fffffe0;
else
cpu_state.regs[cpu_rm].l |= 0x7ffffff0;
}
break;
case 2:
cpu_state.regs[cpu_rm].l = cr2;
break;
case 3:
cpu_state.regs[cpu_rm].l = cr3;
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
cpu_state.regs[cpu_rm].l = cr4;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_r_CRx_a32(uint32_t fetchdat)
{
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0:
cpu_state.regs[cpu_rm].l = cr0;
if (is486 || isibm486)
cpu_state.regs[cpu_rm].l |= 0x10; /*ET hardwired on 486*/
else {
if (is386)
cpu_state.regs[cpu_rm].l |= 0x7fffffe0;
else
cpu_state.regs[cpu_rm].l |= 0x7ffffff0;
}
break;
case 2:
cpu_state.regs[cpu_rm].l = cr2;
break;
case 3:
cpu_state.regs[cpu_rm].l = cr3;
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
cpu_state.regs[cpu_rm].l = cr4;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opMOV_r_DRx_a16(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
cpu_state.regs[cpu_rm].l = dr[cpu_reg];
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
cpu_state.regs[cpu_rm].l = dr[6];
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
cpu_state.regs[cpu_rm].l = dr[7];
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_r_DRx_a32(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
cpu_state.regs[cpu_rm].l = dr[cpu_reg];
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
cpu_state.regs[cpu_rm].l = dr[6];
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
cpu_state.regs[cpu_rm].l = dr[7];
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opMOV_CRx_r_a16(uint32_t fetchdat)
{
uint32_t old_cr0 = cr0;
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0:
if ((cpu_state.regs[cpu_rm].l ^ cr0) & 0x80000001)
flushmmucache();
/* Make sure CPL = 0 when switching from real mode to protected mode. */
if ((cpu_state.regs[cpu_rm].l & 0x01) && !(cr0 & 0x01))
cpu_state.seg_cs.access &= 0x9f;
cr0 = cpu_state.regs[cpu_rm].l;
if (cpu_16bitbus)
cr0 |= 0x10;
if (!(cr0 & 0x80000000))
mmu_perm = 4;
if (hascache && !(cr0 & (1 << 30)))
cpu_cache_int_enabled = 1;
else
cpu_cache_int_enabled = 0;
if (hascache && ((cr0 ^ old_cr0) & (1 << 30)))
cpu_update_waitstates();
if (cr0 & 1)
cpu_cur_status |= CPU_STATUS_PMODE;
else
cpu_cur_status &= ~CPU_STATUS_PMODE;
break;
case 2:
cr2 = cpu_state.regs[cpu_rm].l;
break;
case 3:
cr3 = cpu_state.regs[cpu_rm].l;
flushmmucache();
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
if (((cpu_state.regs[cpu_rm].l ^ cr4) & cpu_CR4_mask) & (CR4_PAE | CR4_PGE))
flushmmucache();
cr4 = cpu_state.regs[cpu_rm].l & cpu_CR4_mask;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_CRx_r_a32(uint32_t fetchdat)
{
uint32_t old_cr0 = cr0;
if ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0:
if ((cpu_state.regs[cpu_rm].l ^ cr0) & 0x80000001)
flushmmucache();
/* Make sure CPL = 0 when switching from real mode to protected mode. */
if ((cpu_state.regs[cpu_rm].l & 0x01) && !(cr0 & 0x01))
cpu_state.seg_cs.access &= 0x9f;
cr0 = cpu_state.regs[cpu_rm].l;
if (cpu_16bitbus)
cr0 |= 0x10;
if (!(cr0 & 0x80000000))
mmu_perm = 4;
if (hascache && !(cr0 & (1 << 30)))
cpu_cache_int_enabled = 1;
else
cpu_cache_int_enabled = 0;
if (hascache && ((cr0 ^ old_cr0) & (1 << 30)))
cpu_update_waitstates();
if (cr0 & 1)
cpu_cur_status |= CPU_STATUS_PMODE;
else
cpu_cur_status &= ~CPU_STATUS_PMODE;
break;
case 2:
cr2 = cpu_state.regs[cpu_rm].l;
break;
case 3:
cr3 = cpu_state.regs[cpu_rm].l;
flushmmucache();
break;
case 4:
if (cpu_has_feature(CPU_FEATURE_CR4)) {
if (((cpu_state.regs[cpu_rm].l ^ cr4) & cpu_CR4_mask) & (CR4_PAE | CR4_PGE))
flushmmucache();
cr4 = cpu_state.regs[cpu_rm].l & cpu_CR4_mask;
break;
}
default:
cpu_state.pc = cpu_state.oldpc;
x86illegal();
break;
}
CLOCK_CYCLES(10);
PREFETCH_RUN(10, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static int
opMOV_DRx_r_a16(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
fetch_ea_16(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
dr[6] = (dr[6] & 0xffff0ff0) | (cpu_state.regs[cpu_rm].l & 0x0000f00f);
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
dr[7] = cpu_state.regs[cpu_rm].l | 0x00000400;
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
CPU_BLOCK_END();
return 0;
}
static int
opMOV_DRx_r_a32(uint32_t fetchdat)
{
if ((CPL > 0) && (cr0 & 1)) {
x86gpf(NULL, 0);
return 1;
}
if ((dr[7] & 0x2000) && !(cpu_state.eflags & RF_FLAG)) {
trap |= 1;
return 1;
}
fetch_ea_32(fetchdat);
switch (cpu_reg) {
case 0 ... 3:
dr[cpu_reg] = cpu_state.regs[cpu_rm].l;
break;
case 4:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 6:
dr[6] = (dr[6] & 0xffff0ff0) | (cpu_state.regs[cpu_rm].l & 0x0000f00f);
break;
case 5:
if (cr4 & 0x8) {
x86illegal();
return 1;
}
fallthrough;
case 7:
dr[7] = cpu_state.regs[cpu_rm].l | 0x00000400;
break;
default:
x86illegal();
return 1;
}
CLOCK_CYCLES(6);
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
CPU_BLOCK_END();
return 0;
}
static void
opMOV_r_TRx(void)
{
#if 0
uint32_t base;
base = _tr[4] & 0xfffff800;
#endif
switch (cpu_reg) {
case 3:
#if 0
pclog("[R] %08X cache = %08X\n", base + cache_index, _tr[3]);
#endif
_tr[3] = *(uint32_t *) &(_cache[cache_index]);
cache_index = (cache_index + 4) & 0xf;
break;
}
cpu_state.regs[cpu_rm].l = _tr[cpu_reg];
CLOCK_CYCLES(6);
}
static int
opMOV_r_TRx_a16(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
opMOV_r_TRx();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_r_TRx_a32(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
opMOV_r_TRx();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
static void
opMOV_TRx_r(void)
{
uint32_t base;
int i;
int ctl;
_tr[cpu_reg] = cpu_state.regs[cpu_rm].l;
base = _tr[4] & 0xfffff800;
ctl = _tr[5] & 3;
switch (cpu_reg) {
case 3:
#if 0
pclog("[W] %08X cache = %08X\n", base + cache_index, _tr[3]);
#endif
*(uint32_t *) &(_cache[cache_index]) = _tr[3];
cache_index = (cache_index + 4) & 0xf;
break;
case 4:
#if 0
if (!(cr0 & 1) && !(_tr[5] & (1 << 19)))
pclog("TAG = %08X, DEST = %08X\n", base, base + cache_index - 16);
#endif
break;
case 5:
#if 0
pclog("[16] EXT = %i (%i), SET = %04X\n", !!(_tr[5] & (1 << 19)), _tr[5] & 0x03, _tr[5] & 0x7f0);
#endif
if (!(_tr[5] & (1 << 19))) {
switch (ctl) {
case 0:
#if 0
pclog(" Cache fill or read...\n", base);
#endif
break;
case 1:
base += (_tr[5] & 0x7f0);
#if 0
pclog(" Writing 16 bytes to %08X...\n", base);
#endif
for (i = 0; i < 16; i += 4)
mem_writel_phys(base + i, *(uint32_t *) &(_cache[i]));
break;
case 2:
base += (_tr[5] & 0x7f0);
#if 0
pclog(" Reading 16 bytes from %08X...\n", base);
#endif
for (i = 0; i < 16; i += 4)
*(uint32_t *) &(_cache[i]) = mem_readl_phys(base + i);
break;
case 3:
#if 0
pclog(" Cache invalidate/flush...\n", base);
#endif
break;
}
}
break;
}
CLOCK_CYCLES(6);
}
static int
opMOV_TRx_r_a16(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_16(fetchdat);
opMOV_TRx_r();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 0);
return 0;
}
static int
opMOV_TRx_r_a32(uint32_t fetchdat)
{
if ((cpu_s->cpu_type == CPU_PENTIUM) || ((CPL || (cpu_state.eflags & VM_FLAG)) && (cr0 & 1))) {
x86gpf(NULL, 0);
return 1;
}
fetch_ea_32(fetchdat);
opMOV_TRx_r();
PREFETCH_RUN(6, 2, rmdat, 0, 0, 0, 0, 1);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_mov_ctrl_2386.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 4,199 |
```objective-c
#define OP_ARITH(name, operation, setflags, flagops, gettempc) \
static int op##name##_b_rmw_a16(uint32_t fetchdat) \
{ \
uint8_t dst; \
uint8_t src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_16(fetchdat); \
if (cpu_mod == 3) { \
dst = getr8(cpu_rm); \
src = getr8(cpu_reg); \
setflags##8 flagops; \
setr8(cpu_rm, operation); \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0); \
} else { \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
dst = geteab(); \
if (cpu_state.abrt) \
return 1; \
src = getr8(cpu_reg); \
seteab(operation); \
if (cpu_state.abrt) \
return 1; \
setflags##8 flagops; \
CLOCK_CYCLES(timing_mr); \
PREFETCH_RUN(timing_mr, 2, rmdat, 1, 0, 1, 0, 0); \
} \
return 0; \
} \
static int op##name##_b_rmw_a32(uint32_t fetchdat) \
{ \
uint8_t dst; \
uint8_t src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_32(fetchdat); \
if (cpu_mod == 3) { \
dst = getr8(cpu_rm); \
src = getr8(cpu_reg); \
setflags##8 flagops; \
setr8(cpu_rm, operation); \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1); \
} else { \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
dst = geteab(); \
if (cpu_state.abrt) \
return 1; \
src = getr8(cpu_reg); \
seteab(operation); \
if (cpu_state.abrt) \
return 1; \
setflags##8 flagops; \
CLOCK_CYCLES(timing_mr); \
PREFETCH_RUN(timing_mr, 2, rmdat, 1, 0, 1, 0, 1); \
} \
return 0; \
} \
\
static int op##name##_w_rmw_a16(uint32_t fetchdat) \
{ \
uint16_t dst; \
uint16_t src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_16(fetchdat); \
if (cpu_mod == 3) { \
dst = cpu_state.regs[cpu_rm].w; \
src = cpu_state.regs[cpu_reg].w; \
setflags##16 flagops; \
cpu_state.regs[cpu_rm].w = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0); \
} else { \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
dst = geteaw(); \
if (cpu_state.abrt) \
return 1; \
src = cpu_state.regs[cpu_reg].w; \
seteaw(operation); \
if (cpu_state.abrt) \
return 1; \
setflags##16 flagops; \
CLOCK_CYCLES(timing_mr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 1, 0, 1, 0, 0); \
} \
return 0; \
} \
static int op##name##_w_rmw_a32(uint32_t fetchdat) \
{ \
uint16_t dst; \
uint16_t src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_32(fetchdat); \
if (cpu_mod == 3) { \
dst = cpu_state.regs[cpu_rm].w; \
src = cpu_state.regs[cpu_reg].w; \
setflags##16 flagops; \
cpu_state.regs[cpu_rm].w = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1); \
} else { \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
dst = geteaw(); \
if (cpu_state.abrt) \
return 1; \
src = cpu_state.regs[cpu_reg].w; \
seteaw(operation); \
if (cpu_state.abrt) \
return 1; \
setflags##16 flagops; \
CLOCK_CYCLES(timing_mr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 1, 0, 1, 0, 1); \
} \
return 0; \
} \
\
static int op##name##_l_rmw_a16(uint32_t fetchdat) \
{ \
uint32_t dst; \
uint32_t src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_16(fetchdat); \
if (cpu_mod == 3) { \
dst = cpu_state.regs[cpu_rm].l; \
src = cpu_state.regs[cpu_reg].l; \
setflags##32 flagops; \
cpu_state.regs[cpu_rm].l = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 0); \
} else { \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
dst = geteal(); \
if (cpu_state.abrt) \
return 1; \
src = cpu_state.regs[cpu_reg].l; \
seteal(operation); \
if (cpu_state.abrt) \
return 1; \
setflags##32 flagops; \
CLOCK_CYCLES(timing_mr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 1, 0, 1, 0); \
} \
return 0; \
} \
static int op##name##_l_rmw_a32(uint32_t fetchdat) \
{ \
uint32_t dst; \
uint32_t src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_32(fetchdat); \
if (cpu_mod == 3) { \
dst = cpu_state.regs[cpu_rm].l; \
src = cpu_state.regs[cpu_reg].l; \
setflags##32 flagops; \
cpu_state.regs[cpu_rm].l = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 0, 0, 0, 1); \
} else { \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
dst = geteal(); \
if (cpu_state.abrt) \
return 1; \
src = cpu_state.regs[cpu_reg].l; \
seteal(operation); \
if (cpu_state.abrt) \
return 1; \
setflags##32 flagops; \
CLOCK_CYCLES(timing_mr); \
PREFETCH_RUN(timing_rr, 2, rmdat, 0, 1, 0, 1, 1); \
} \
return 0; \
} \
\
static int op##name##_b_rm_a16(uint32_t fetchdat) \
{ \
uint8_t dst, src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
dst = getr8(cpu_reg); \
src = geteab(); \
if (cpu_state.abrt) \
return 1; \
setflags##8 flagops; \
setr8(cpu_reg, operation); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm); \
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0); \
return 0; \
} \
static int op##name##_b_rm_a32(uint32_t fetchdat) \
{ \
uint8_t dst, src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
dst = getr8(cpu_reg); \
src = geteab(); \
if (cpu_state.abrt) \
return 1; \
setflags##8 flagops; \
setr8(cpu_reg, operation); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm); \
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1); \
return 0; \
} \
\
static int op##name##_w_rm_a16(uint32_t fetchdat) \
{ \
uint16_t dst, src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
dst = cpu_state.regs[cpu_reg].w; \
src = geteaw(); \
if (cpu_state.abrt) \
return 1; \
setflags##16 flagops; \
cpu_state.regs[cpu_reg].w = operation; \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm); \
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0); \
return 0; \
} \
static int op##name##_w_rm_a32(uint32_t fetchdat) \
{ \
uint16_t dst, src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
dst = cpu_state.regs[cpu_reg].w; \
src = geteaw(); \
if (cpu_state.abrt) \
return 1; \
setflags##16 flagops; \
cpu_state.regs[cpu_reg].w = operation; \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm); \
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1); \
return 0; \
} \
\
static int op##name##_l_rm_a16(uint32_t fetchdat) \
{ \
uint32_t dst, src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
dst = cpu_state.regs[cpu_reg].l; \
src = geteal(); \
if (cpu_state.abrt) \
return 1; \
setflags##32 flagops; \
cpu_state.regs[cpu_reg].l = operation; \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rml); \
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0); \
return 0; \
} \
static int op##name##_l_rm_a32(uint32_t fetchdat) \
{ \
uint32_t dst, src; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_READ(cpu_state.ea_seg); \
dst = cpu_state.regs[cpu_reg].l; \
src = geteal(); \
if (cpu_state.abrt) \
return 1; \
setflags##32 flagops; \
cpu_state.regs[cpu_reg].l = operation; \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rml); \
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1); \
return 0; \
} \
\
static int op##name##_AL_imm(uint32_t fetchdat) \
{ \
uint8_t dst = AL; \
uint8_t src = getbytef(); \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
setflags##8 flagops; \
AL = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0); \
return 0; \
} \
\
static int op##name##_AX_imm(uint32_t fetchdat) \
{ \
uint16_t dst = AX; \
uint16_t src = getwordf(); \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
setflags##16 flagops; \
AX = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0); \
return 0; \
} \
\
static int op##name##_EAX_imm(uint32_t fetchdat) \
{ \
uint32_t dst = EAX; \
uint32_t src = getlong(); \
if (cpu_state.abrt) \
return 1; \
if (gettempc) \
tempc = CF_SET() ? 1 : 0; \
setflags##32 flagops; \
EAX = operation; \
CLOCK_CYCLES(timing_rr); \
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0); \
return 0; \
}
OP_ARITH(ADD, dst + src, setadd, (dst, src), 0)
OP_ARITH(ADC, dst + src + tempc, setadc, (dst, src), 1)
OP_ARITH(SUB, dst - src, setsub, (dst, src), 0)
OP_ARITH(SBB, dst - (src + tempc), setsbc, (dst, src), 1)
OP_ARITH(OR, dst | src, setznp, (dst | src), 0)
OP_ARITH(AND, dst &src, setznp, (dst & src), 0)
OP_ARITH(XOR, dst ^ src, setznp, (dst ^ src), 0)
static int
opCMP_b_rmw_a16(uint32_t fetchdat)
{
uint8_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteab();
if (cpu_state.abrt)
return 1;
setsub8(dst, getr8(cpu_reg));
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
}
static int
opCMP_b_rmw_a32(uint32_t fetchdat)
{
uint8_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteab();
if (cpu_state.abrt)
return 1;
setsub8(dst, getr8(cpu_reg));
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
}
static int
opCMP_w_rmw_a16(uint32_t fetchdat)
{
uint16_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteaw();
if (cpu_state.abrt)
return 1;
setsub16(dst, cpu_state.regs[cpu_reg].w);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
}
static int
opCMP_w_rmw_a32(uint32_t fetchdat)
{
uint16_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteaw();
if (cpu_state.abrt)
return 1;
setsub16(dst, cpu_state.regs[cpu_reg].w);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
}
static int
opCMP_l_rmw_a16(uint32_t fetchdat)
{
uint32_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteal();
if (cpu_state.abrt)
return 1;
setsub32(dst, cpu_state.regs[cpu_reg].l);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return 0;
}
static int
opCMP_l_rmw_a32(uint32_t fetchdat)
{
uint32_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
dst = geteal();
if (cpu_state.abrt)
return 1;
setsub32(dst, cpu_state.regs[cpu_reg].l);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return 0;
}
static int
opCMP_b_rm_a16(uint32_t fetchdat)
{
uint8_t src;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
src = geteab();
if (cpu_state.abrt)
return 1;
setsub8(getr8(cpu_reg), src);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
}
static int
opCMP_b_rm_a32(uint32_t fetchdat)
{
uint8_t src;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
src = geteab();
if (cpu_state.abrt)
return 1;
setsub8(getr8(cpu_reg), src);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
}
static int
opCMP_w_rm_a16(uint32_t fetchdat)
{
uint16_t src;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
src = geteaw();
if (cpu_state.abrt)
return 1;
setsub16(cpu_state.regs[cpu_reg].w, src);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
}
static int
opCMP_w_rm_a32(uint32_t fetchdat)
{
uint16_t src;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
src = geteaw();
if (cpu_state.abrt)
return 1;
setsub16(cpu_state.regs[cpu_reg].w, src);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rm);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
}
static int
opCMP_l_rm_a16(uint32_t fetchdat)
{
uint32_t src;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
src = geteal();
if (cpu_state.abrt)
return 1;
setsub32(cpu_state.regs[cpu_reg].l, src);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rml);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return 0;
}
static int
opCMP_l_rm_a32(uint32_t fetchdat)
{
uint32_t src;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
src = geteal();
if (cpu_state.abrt)
return 1;
setsub32(cpu_state.regs[cpu_reg].l, src);
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_rml);
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return 0;
}
static int
opCMP_AL_imm(uint32_t fetchdat)
{
uint8_t src = getbytef();
setsub8(AL, src);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCMP_AX_imm(uint32_t fetchdat)
{
uint16_t src = getwordf();
setsub16(AX, src);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCMP_EAX_imm(uint32_t fetchdat)
{
uint32_t src = getlong();
if (cpu_state.abrt)
return 1;
setsub32(EAX, src);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opTEST_b_a16(uint32_t fetchdat)
{
uint8_t temp;
uint8_t temp2;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
temp2 = getr8(cpu_reg);
setznp8(temp & temp2);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
}
static int
opTEST_b_a32(uint32_t fetchdat)
{
uint8_t temp;
uint8_t temp2;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
temp2 = getr8(cpu_reg);
setznp8(temp & temp2);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
}
static int
opTEST_w_a16(uint32_t fetchdat)
{
uint16_t temp;
uint16_t temp2;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
temp2 = cpu_state.regs[cpu_reg].w;
setznp16(temp & temp2);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
return 0;
}
static int
opTEST_w_a32(uint32_t fetchdat)
{
uint16_t temp;
uint16_t temp2;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
temp2 = cpu_state.regs[cpu_reg].w;
setznp16(temp & temp2);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
return 0;
}
static int
opTEST_l_a16(uint32_t fetchdat)
{
uint32_t temp;
uint32_t temp2;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
temp2 = cpu_state.regs[cpu_reg].l;
setznp32(temp & temp2);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
return 0;
}
static int
opTEST_l_a32(uint32_t fetchdat)
{
uint32_t temp;
uint32_t temp2;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_READ(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
temp2 = cpu_state.regs[cpu_reg].l;
setznp32(temp & temp2);
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
return 0;
}
static int
opTEST_AL(uint32_t fetchdat)
{
uint8_t temp = getbytef();
setznp8(AL & temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 2, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opTEST_AX(uint32_t fetchdat)
{
uint16_t temp = getwordf();
setznp16(AX & temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 3, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opTEST_EAX(uint32_t fetchdat)
{
uint32_t temp = getlong();
if (cpu_state.abrt)
return 1;
setznp32(EAX & temp);
CLOCK_CYCLES(timing_rr);
PREFETCH_RUN(timing_rr, 5, -1, 0, 0, 0, 0, 0);
return 0;
}
#define ARITH_MULTI(ea_width, flag_width) \
dst = getea##ea_width(); \
if (cpu_state.abrt) \
return 1; \
switch (rmdat & 0x38) { \
case 0x00: /*ADD ea, #*/ \
setea##ea_width(dst + src); \
if (cpu_state.abrt) \
return 1; \
setadd##flag_width(dst, src); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x08: /*OR ea, #*/ \
dst |= src; \
setea##ea_width(dst); \
if (cpu_state.abrt) \
return 1; \
setznp##flag_width(dst); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x10: /*ADC ea, #*/ \
tempc = CF_SET() ? 1 : 0; \
setea##ea_width(dst + src + tempc); \
if (cpu_state.abrt) \
return 1; \
setadc##flag_width(dst, src); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x18: /*SBB ea, #*/ \
tempc = CF_SET() ? 1 : 0; \
setea##ea_width(dst - (src + tempc)); \
if (cpu_state.abrt) \
return 1; \
setsbc##flag_width(dst, src); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x20: /*AND ea, #*/ \
dst &= src; \
setea##ea_width(dst); \
if (cpu_state.abrt) \
return 1; \
setznp##flag_width(dst); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x28: /*SUB ea, #*/ \
setea##ea_width(dst - src); \
if (cpu_state.abrt) \
return 1; \
setsub##flag_width(dst, src); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x30: /*XOR ea, #*/ \
dst ^= src; \
setea##ea_width(dst); \
if (cpu_state.abrt) \
return 1; \
setznp##flag_width(dst); \
CLOCK_CYCLES((cpu_mod == 3) ? timing_rr : timing_mr); \
break; \
case 0x38: /*CMP ea, #*/ \
setsub##flag_width(dst, src); \
if (is486) { \
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 2); \
} else { \
CLOCK_CYCLES((cpu_mod == 3) ? 2 : 7); \
} \
break; \
}
static int
op80_a16(uint32_t fetchdat)
{
uint8_t src;
uint8_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getbyte();
if (cpu_state.abrt)
return 1;
ARITH_MULTI(b, 8);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
}
return 0;
}
static int
op80_a32(uint32_t fetchdat)
{
uint8_t src;
uint8_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getbyte();
if (cpu_state.abrt)
return 1;
ARITH_MULTI(b, 8);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
}
return 0;
}
static int
op81_w_a16(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getword();
if (cpu_state.abrt)
return 1;
ARITH_MULTI(w, 16);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 4, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 4, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
}
return 0;
}
static int
op81_w_a32(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getword();
if (cpu_state.abrt)
return 1;
ARITH_MULTI(w, 16);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 4, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 4, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
}
return 0;
}
static int
op81_l_a16(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getlong();
if (cpu_state.abrt)
return 1;
ARITH_MULTI(l, 32);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 6, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 6, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
}
return 0;
}
static int
op81_l_a32(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getlong();
if (cpu_state.abrt)
return 1;
ARITH_MULTI(l, 32);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 6, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 6, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
}
return 0;
}
static int
op83_w_a16(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getbyte();
if (cpu_state.abrt)
return 1;
if (src & 0x80)
src |= 0xff00;
ARITH_MULTI(w, 16);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 0);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
}
return 0;
}
static int
op83_w_a32(uint32_t fetchdat)
{
uint16_t src;
uint16_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getbyte();
if (cpu_state.abrt)
return 1;
if (src & 0x80)
src |= 0xff00;
ARITH_MULTI(w, 16);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, 0, 0, 1);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 3, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
}
return 0;
}
static int
op83_l_a16(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getbyte();
if (cpu_state.abrt)
return 1;
if (src & 0x80)
src |= 0xffffff00;
ARITH_MULTI(l, 32);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 0);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
}
return 0;
}
static int
op83_l_a32(uint32_t fetchdat)
{
uint32_t src;
uint32_t dst;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getbyte();
if (cpu_state.abrt)
return 1;
if (src & 0x80)
src |= 0xffffff00;
ARITH_MULTI(l, 32);
if ((rmdat & 0x38) == 0x38) {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_mr, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, 0, 1);
} else {
PREFETCH_RUN((cpu_mod == 3) ? timing_rr : timing_rm, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
}
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_arith.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 11,074 |
```objective-c
#ifdef USE_NEW_DYNAREC
# define OP_SHIFT_b(c, ea32) \
{ \
uint8_t temp_orig = temp; \
if (!c) \
return 0; \
flags_rebuild(); \
switch (rmdat & 0x38) { \
case 0x00: /*ROL b, c*/ \
temp = (temp << (c & 7)) | (temp >> (8 - (c & 7))); \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_rotate(FLAGS_ROL8, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x08: /*ROR b,CL*/ \
temp = (temp >> (c & 7)) | (temp << (8 - (c & 7))); \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_rotate(FLAGS_ROR8, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x10: /*RCL b,CL*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 1 : 0; \
temp2 = temp & 0x80; \
temp = (temp << 1) | tempc; \
c--; \
} \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((cpu_state.flags & C_FLAG) ^ (temp >> 7)) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x18: /*RCR b,CL*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 0x80 : 0; \
temp2 = temp & 1; \
temp = (temp >> 1) | tempc; \
c--; \
} \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x40) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x20: \
case 0x30: /*SHL b,CL*/ \
seteab(temp << c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHL8, temp_orig, c, (temp << c) & 0xff); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x28: /*SHR b,CL*/ \
seteab(temp >> c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHR8, temp_orig, c, temp >> c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x38: /*SAR b,CL*/ \
temp = (int8_t) temp >> c; \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SAR8, temp_orig, c, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
} \
}
# define OP_SHIFT_w(c, ea32) \
{ \
uint16_t temp_orig = temp; \
if (!c) \
return 0; \
flags_rebuild(); \
switch (rmdat & 0x38) { \
case 0x00: /*ROL w, c*/ \
temp = (temp << (c & 15)) | (temp >> (16 - (c & 15))); \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_rotate(FLAGS_ROL16, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x08: /*ROR w,CL*/ \
temp = (temp >> (c & 15)) | (temp << (16 - (c & 15))); \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_rotate(FLAGS_ROR16, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x10: /*RCL w, c*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 1 : 0; \
temp2 = temp & 0x8000; \
temp = (temp << 1) | tempc; \
c--; \
} \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((cpu_state.flags & C_FLAG) ^ (temp >> 15)) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x18: /*RCR w, c*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 0x8000 : 0; \
temp2 = temp & 1; \
temp = (temp >> 1) | tempc; \
c--; \
} \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x4000) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x20: \
case 0x30: /*SHL w, c*/ \
seteaw(temp << c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHL16, temp_orig, c, (temp << c) & 0xffff); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x28: /*SHR w, c*/ \
seteaw(temp >> c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHR16, temp_orig, c, temp >> c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x38: /*SAR w, c*/ \
temp = (int16_t) temp >> c; \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SAR16, temp_orig, c, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
} \
}
# define OP_SHIFT_l(c, ea32) \
{ \
uint32_t temp_orig = temp; \
if (!c) \
return 0; \
flags_rebuild(); \
switch (rmdat & 0x38) { \
case 0x00: /*ROL l, c*/ \
temp = (temp << c) | (temp >> (32 - c)); \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_rotate(FLAGS_ROL32, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x08: /*ROR l,CL*/ \
temp = (temp >> c) | (temp << (32 - c)); \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_rotate(FLAGS_ROR32, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x10: /*RCL l, c*/ \
temp2 = CF_SET(); \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 1 : 0; \
temp2 = temp & 0x80000000; \
temp = (temp << 1) | tempc; \
c--; \
} \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((cpu_state.flags & C_FLAG) ^ (temp >> 31)) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x18: /*RCR l, c*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 0x80000000 : 0; \
temp2 = temp & 1; \
temp = (temp >> 1) | tempc; \
c--; \
} \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x40000000) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x20: \
case 0x30: /*SHL l, c*/ \
seteal(temp << c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHL32, temp_orig, c, temp << c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x28: /*SHR l, c*/ \
seteal(temp >> c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHR32, temp_orig, c, temp >> c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x38: /*SAR l, c*/ \
temp = (int32_t) temp >> c; \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SAR32, temp_orig, c, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
} \
}
#else
# define OP_SHIFT_b(c, ea32) \
{ \
uint8_t temp_orig = temp; \
if (!c) \
return 0; \
flags_rebuild(); \
switch (rmdat & 0x38) { \
case 0x00: /*ROL b, c*/ \
temp = (temp << (c & 7)) | (temp >> (8 - (c & 7))); \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp & 1) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 7)) & 1) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x08: /*ROR b,CL*/ \
temp = (temp >> (c & 7)) | (temp << (8 - (c & 7))); \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp & 0x80) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x40) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x10: /*RCL b,CL*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 1 : 0; \
temp2 = temp & 0x80; \
temp = (temp << 1) | tempc; \
c--; \
} \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((cpu_state.flags & C_FLAG) ^ (temp >> 7)) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x18: /*RCR b,CL*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 0x80 : 0; \
temp2 = temp & 1; \
temp = (temp >> 1) | tempc; \
c--; \
} \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x40) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x20: \
case 0x30: /*SHL b,CL*/ \
seteab(temp << c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHL8, temp_orig, c, (temp << c) & 0xff); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x28: /*SHR b,CL*/ \
seteab(temp >> c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHR8, temp_orig, c, temp >> c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x38: /*SAR b,CL*/ \
temp = (int8_t) temp >> c; \
seteab(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SAR8, temp_orig, c, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
} \
}
# define OP_SHIFT_w(c, ea32) \
{ \
uint16_t temp_orig = temp; \
if (!c) \
return 0; \
flags_rebuild(); \
switch (rmdat & 0x38) { \
case 0x00: /*ROL w, c*/ \
temp = (temp << (c & 15)) | (temp >> (16 - (c & 15))); \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp & 1) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 15)) & 1) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x08: /*ROR w,CL*/ \
temp = (temp >> (c & 15)) | (temp << (16 - (c & 15))); \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp & 0x8000) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x4000) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x10: /*RCL w, c*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 1 : 0; \
temp2 = temp & 0x8000; \
temp = (temp << 1) | tempc; \
c--; \
} \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((cpu_state.flags & C_FLAG) ^ (temp >> 15)) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x18: /*RCR w, c*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 0x8000 : 0; \
temp2 = temp & 1; \
temp = (temp >> 1) | tempc; \
c--; \
} \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x4000) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x20: \
case 0x30: /*SHL w, c*/ \
seteaw(temp << c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHL16, temp_orig, c, (temp << c) & 0xffff); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x28: /*SHR w, c*/ \
seteaw(temp >> c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHR16, temp_orig, c, temp >> c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x38: /*SAR w, c*/ \
temp = (int16_t) temp >> c; \
seteaw(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SAR16, temp_orig, c, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
} \
}
# define OP_SHIFT_l(c, ea32) \
{ \
uint32_t temp_orig = temp; \
if (!c) \
return 0; \
flags_rebuild(); \
switch (rmdat & 0x38) { \
case 0x00: /*ROL l, c*/ \
temp = (temp << c) | (temp >> (32 - c)); \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp & 1) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 31)) & 1) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x08: /*ROR l,CL*/ \
temp = (temp >> c) | (temp << (32 - c)); \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp & 0x80000000) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x40000000) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0, ea32); \
break; \
case 0x10: /*RCL l, c*/ \
temp2 = CF_SET(); \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 1 : 0; \
temp2 = temp & 0x80000000; \
temp = (temp << 1) | tempc; \
c--; \
} \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((cpu_state.flags & C_FLAG) ^ (temp >> 31)) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x18: /*RCR l, c*/ \
temp2 = cpu_state.flags & C_FLAG; \
if (is486) \
CLOCK_CYCLES_ALWAYS(c); \
while (c > 0) { \
tempc = temp2 ? 0x80000000 : 0; \
temp2 = temp & 1; \
temp = (temp >> 1) | tempc; \
c--; \
} \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
cpu_state.flags &= ~(C_FLAG | V_FLAG); \
if (temp2) \
cpu_state.flags |= C_FLAG; \
if ((temp ^ (temp >> 1)) & 0x40000000) \
cpu_state.flags |= V_FLAG; \
CLOCK_CYCLES((cpu_mod == 3) ? 9 : 10); \
PREFETCH_RUN((cpu_mod == 3) ? 9 : 10, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x20: \
case 0x30: /*SHL l, c*/ \
seteal(temp << c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHL32, temp_orig, c, temp << c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x28: /*SHR l, c*/ \
seteal(temp >> c); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SHR32, temp_orig, c, temp >> c); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
case 0x38: /*SAR l, c*/ \
temp = (int32_t) temp >> c; \
seteal(temp); \
if (cpu_state.abrt) \
return 1; \
set_flags_shift(FLAGS_SAR32, temp_orig, c, temp); \
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 7); \
PREFETCH_RUN((cpu_mod == 3) ? 3 : 7, 2, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, ea32); \
break; \
} \
}
#endif
static int
opC0_a16(uint32_t fetchdat)
{
int c;
int tempc;
uint8_t temp;
uint8_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = readmemb(cs, cpu_state.pc) & 31;
cpu_state.pc++;
PREFETCH_PREFIX();
temp = geteab();
if (cpu_state.abrt)
return 1;
OP_SHIFT_b(c, 0);
return 0;
}
static int
opC0_a32(uint32_t fetchdat)
{
int c;
int tempc;
uint8_t temp;
uint8_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = readmemb(cs, cpu_state.pc) & 31;
cpu_state.pc++;
PREFETCH_PREFIX();
temp = geteab();
if (cpu_state.abrt)
return 1;
OP_SHIFT_b(c, 1);
return 0;
}
static int
opC1_w_a16(uint32_t fetchdat)
{
int c;
int tempc;
uint16_t temp;
uint16_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = readmemb(cs, cpu_state.pc) & 31;
cpu_state.pc++;
PREFETCH_PREFIX();
temp = geteaw();
if (cpu_state.abrt)
return 1;
OP_SHIFT_w(c, 0);
return 0;
}
static int
opC1_w_a32(uint32_t fetchdat)
{
int c;
int tempc;
uint16_t temp;
uint16_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = readmemb(cs, cpu_state.pc) & 31;
cpu_state.pc++;
PREFETCH_PREFIX();
temp = geteaw();
if (cpu_state.abrt)
return 1;
OP_SHIFT_w(c, 1);
return 0;
}
static int
opC1_l_a16(uint32_t fetchdat)
{
int c;
int tempc;
uint32_t temp;
uint32_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = readmemb(cs, cpu_state.pc) & 31;
cpu_state.pc++;
PREFETCH_PREFIX();
temp = geteal();
if (cpu_state.abrt)
return 1;
OP_SHIFT_l(c, 0);
return 0;
}
static int
opC1_l_a32(uint32_t fetchdat)
{
int c;
int tempc;
uint32_t temp;
uint32_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = readmemb(cs, cpu_state.pc) & 31;
cpu_state.pc++;
PREFETCH_PREFIX();
temp = geteal();
if (cpu_state.abrt)
return 1;
OP_SHIFT_l(c, 1);
return 0;
}
static int
opD0_a16(uint32_t fetchdat)
{
int c = 1;
int tempc;
uint8_t temp;
uint8_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
OP_SHIFT_b(c, 0);
return 0;
}
static int
opD0_a32(uint32_t fetchdat)
{
int c = 1;
int tempc;
uint8_t temp;
uint8_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
OP_SHIFT_b(c, 1);
return 0;
}
static int
opD1_w_a16(uint32_t fetchdat)
{
int c = 1;
int tempc;
uint16_t temp;
uint16_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
OP_SHIFT_w(c, 0);
return 0;
}
static int
opD1_w_a32(uint32_t fetchdat)
{
int c = 1;
int tempc;
uint16_t temp;
uint16_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
OP_SHIFT_w(c, 1);
return 0;
}
static int
opD1_l_a16(uint32_t fetchdat)
{
int c = 1;
int tempc;
uint32_t temp;
uint32_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
OP_SHIFT_l(c, 0);
return 0;
}
static int
opD1_l_a32(uint32_t fetchdat)
{
int c = 1;
int tempc;
uint32_t temp;
uint32_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
OP_SHIFT_l(c, 1);
return 0;
}
static int
opD2_a16(uint32_t fetchdat)
{
int c;
int tempc;
uint8_t temp;
uint8_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = CL & 31;
temp = geteab();
if (cpu_state.abrt)
return 1;
OP_SHIFT_b(c, 0);
return 0;
}
static int
opD2_a32(uint32_t fetchdat)
{
int c;
int tempc;
uint8_t temp;
uint8_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = CL & 31;
temp = geteab();
if (cpu_state.abrt)
return 1;
OP_SHIFT_b(c, 1);
return 0;
}
static int
opD3_w_a16(uint32_t fetchdat)
{
int c;
int tempc;
uint16_t temp;
uint16_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = CL & 31;
temp = geteaw();
if (cpu_state.abrt)
return 1;
OP_SHIFT_w(c, 0);
return 0;
}
static int
opD3_w_a32(uint32_t fetchdat)
{
int c;
int tempc;
uint16_t temp;
uint16_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = CL & 31;
temp = geteaw();
if (cpu_state.abrt)
return 1;
OP_SHIFT_w(c, 1);
return 0;
}
static int
opD3_l_a16(uint32_t fetchdat)
{
int c;
int tempc;
uint32_t temp;
uint32_t temp2 = 0;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = CL & 31;
temp = geteal();
if (cpu_state.abrt)
return 1;
OP_SHIFT_l(c, 0);
return 0;
}
static int
opD3_l_a32(uint32_t fetchdat)
{
int c;
int tempc;
uint32_t temp;
uint32_t temp2 = 0;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
c = CL & 31;
temp = geteal();
if (cpu_state.abrt)
return 1;
OP_SHIFT_l(c, 1);
return 0;
}
#define SHLD_w() \
if (count) { \
int tempc; \
uint32_t templ; \
uint16_t tempw = geteaw(); \
if (cpu_state.abrt) \
return 1; \
tempc = ((tempw << (count - 1)) & (1 << 15)) ? 1 : 0; \
templ = (tempw << 16) | cpu_state.regs[cpu_reg].w; \
if (count <= 16) \
tempw = templ >> (16 - count); \
else \
tempw = (templ << count) >> 16; \
seteaw(tempw); \
if (cpu_state.abrt) \
return 1; \
setznp16(tempw); \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
}
#define SHLD_l() \
if (count) { \
int tempc; \
uint32_t templ = geteal(); \
if (cpu_state.abrt) \
return 1; \
tempc = ((templ << (count - 1)) & (1 << 31)) ? 1 : 0; \
templ = (templ << count) | (cpu_state.regs[cpu_reg].l >> (32 - count)); \
seteal(templ); \
if (cpu_state.abrt) \
return 1; \
setznp32(templ); \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
}
#define SHRD_w() \
if (count) { \
int tempc; \
uint32_t templ; \
uint16_t tempw = geteaw(); \
if (cpu_state.abrt) \
return 1; \
tempc = (tempw >> (count - 1)) & 1; \
templ = tempw | (cpu_state.regs[cpu_reg].w << 16); \
tempw = templ >> count; \
seteaw(tempw); \
if (cpu_state.abrt) \
return 1; \
setznp16(tempw); \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
}
#define SHRD_l() \
if (count) { \
int tempc; \
uint32_t templ = geteal(); \
if (cpu_state.abrt) \
return 1; \
tempc = (templ >> (count - 1)) & 1; \
templ = (templ >> count) | (cpu_state.regs[cpu_reg].l << (32 - count)); \
seteal(templ); \
if (cpu_state.abrt) \
return 1; \
setznp32(templ); \
flags_rebuild(); \
if (tempc) \
cpu_state.flags |= C_FLAG; \
}
#define opSHxD(operation) \
static int op##operation##_i_a16(uint32_t fetchdat) \
{ \
int count; \
\
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
count = getbyte() & 31; \
operation(); \
\
CLOCK_CYCLES(3); \
PREFETCH_RUN(3, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0); \
return 0; \
} \
static int op##operation##_CL_a16(uint32_t fetchdat) \
{ \
int count; \
\
fetch_ea_16(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
count = CL & 31; \
operation(); \
\
CLOCK_CYCLES(3); \
PREFETCH_RUN(3, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 0); \
return 0; \
} \
static int op##operation##_i_a32(uint32_t fetchdat) \
{ \
int count; \
\
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
count = getbyte() & 31; \
operation(); \
\
CLOCK_CYCLES(3); \
PREFETCH_RUN(3, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1); \
return 0; \
} \
static int op##operation##_CL_a32(uint32_t fetchdat) \
{ \
int count; \
\
fetch_ea_32(fetchdat); \
if (cpu_mod != 3) \
SEG_CHECK_WRITE(cpu_state.ea_seg); \
count = CL & 31; \
operation(); \
\
CLOCK_CYCLES(3); \
PREFETCH_RUN(3, 3, rmdat, 0, (cpu_mod == 3) ? 0 : 1, 0, (cpu_mod == 3) ? 0 : 1, 1); \
return 0; \
}
// clang-format off
opSHxD(SHLD_w)
opSHxD(SHLD_l)
opSHxD(SHRD_w)
opSHxD(SHRD_l)
// clang-format on
``` | /content/code_sandbox/src/cpu/x86_ops_shift.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 12,829 |
```objective-c
static int
opCMPXCHG_b_a16(uint32_t fetchdat)
{
uint8_t temp;
uint8_t temp2 = AL;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
if (AL == temp)
seteab(getr8(cpu_reg));
else
AL = temp;
if (cpu_state.abrt)
return 1;
setsub8(temp2, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 6 : 10);
return 0;
}
static int
opCMPXCHG_b_a32(uint32_t fetchdat)
{
uint8_t temp;
uint8_t temp2 = AL;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteab();
if (cpu_state.abrt)
return 1;
if (AL == temp)
seteab(getr8(cpu_reg));
else
AL = temp;
if (cpu_state.abrt)
return 1;
setsub8(temp2, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 6 : 10);
return 0;
}
static int
opCMPXCHG_w_a16(uint32_t fetchdat)
{
uint16_t temp;
uint16_t temp2 = AX;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
if (AX == temp)
seteaw(cpu_state.regs[cpu_reg].w);
else
AX = temp;
if (cpu_state.abrt)
return 1;
setsub16(temp2, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 6 : 10);
return 0;
}
static int
opCMPXCHG_w_a32(uint32_t fetchdat)
{
uint16_t temp;
uint16_t temp2 = AX;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteaw();
if (cpu_state.abrt)
return 1;
if (AX == temp)
seteaw(cpu_state.regs[cpu_reg].w);
else
AX = temp;
if (cpu_state.abrt)
return 1;
setsub16(temp2, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 6 : 10);
return 0;
}
static int
opCMPXCHG_l_a16(uint32_t fetchdat)
{
uint32_t temp;
uint32_t temp2 = EAX;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
if (EAX == temp)
seteal(cpu_state.regs[cpu_reg].l);
else
EAX = temp;
if (cpu_state.abrt)
return 1;
setsub32(temp2, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 6 : 10);
return 0;
}
static int
opCMPXCHG_l_a32(uint32_t fetchdat)
{
uint32_t temp;
uint32_t temp2 = EAX;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
if (cpu_state.abrt)
return 1;
if (EAX == temp)
seteal(cpu_state.regs[cpu_reg].l);
else
EAX = temp;
if (cpu_state.abrt)
return 1;
setsub32(temp2, temp);
CLOCK_CYCLES((cpu_mod == 3) ? 6 : 10);
return 0;
}
#ifndef OPS_286_386
static int
opCMPXCHG8B_a16(uint32_t fetchdat)
{
uint32_t temp;
uint32_t temp_hi;
uint32_t temp2 = EAX;
uint32_t temp2_hi = EDX;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
temp_hi = readmeml(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 0;
if (EAX == temp && EDX == temp_hi) {
seteal(EBX);
writememl(easeg, cpu_state.eaaddr + 4, ECX);
} else {
EAX = temp;
EDX = temp_hi;
}
if (cpu_state.abrt)
return 0;
flags_rebuild();
if (temp == temp2 && temp_hi == temp2_hi)
cpu_state.flags |= Z_FLAG;
else
cpu_state.flags &= ~Z_FLAG;
cycles -= (cpu_mod == 3) ? 6 : 10;
return 0;
}
static int
opCMPXCHG8B_a32(uint32_t fetchdat)
{
uint32_t temp;
uint32_t temp_hi;
uint32_t temp2 = EAX;
uint32_t temp2_hi = EDX;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
temp = geteal();
temp_hi = readmeml(easeg, cpu_state.eaaddr + 4);
if (cpu_state.abrt)
return 0;
if (EAX == temp && EDX == temp_hi) {
seteal(EBX);
writememl(easeg, cpu_state.eaaddr + 4, ECX);
} else {
EAX = temp;
EDX = temp_hi;
}
if (cpu_state.abrt)
return 0;
flags_rebuild();
if (temp == temp2 && temp_hi == temp2_hi)
cpu_state.flags |= Z_FLAG;
else
cpu_state.flags &= ~Z_FLAG;
cycles -= (cpu_mod == 3) ? 6 : 10;
return 0;
}
#endif
/* dest = eab, src = r8 */
static int
opXADD_b_a16(uint32_t fetchdat)
{
uint8_t temp;
uint8_t src;
uint8_t dest;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getr8(cpu_reg);
dest = geteab();
if (cpu_state.abrt)
return 1;
temp = src + dest;
seteab(temp);
if (cpu_state.abrt)
return 1;
setadd8(src, dest);
setr8(cpu_reg, dest);
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 4);
return 0;
}
static int
opXADD_b_a32(uint32_t fetchdat)
{
uint8_t temp;
uint8_t src;
uint8_t dest;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = getr8(cpu_reg);
dest = geteab();
if (cpu_state.abrt)
return 1;
temp = src + dest;
seteab(temp);
if (cpu_state.abrt)
return 1;
setadd8(src, dest);
setr8(cpu_reg, dest);
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 4);
return 0;
}
static int
opXADD_w_a16(uint32_t fetchdat)
{
uint16_t temp;
uint16_t src;
uint16_t dest;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = cpu_state.regs[cpu_reg].w;
dest = geteaw();
if (cpu_state.abrt)
return 1;
temp = src + dest;
seteaw(temp);
if (cpu_state.abrt)
return 1;
setadd16(src, dest);
cpu_state.regs[cpu_reg].w = dest;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 4);
return 0;
}
static int
opXADD_w_a32(uint32_t fetchdat)
{
uint16_t temp;
uint16_t src;
uint16_t dest;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = cpu_state.regs[cpu_reg].w;
dest = geteaw();
if (cpu_state.abrt)
return 1;
temp = src + dest;
seteaw(temp);
if (cpu_state.abrt)
return 1;
setadd16(src, dest);
cpu_state.regs[cpu_reg].w = dest;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 4);
return 0;
}
static int
opXADD_l_a16(uint32_t fetchdat)
{
uint32_t temp;
uint32_t src;
uint32_t dest;
fetch_ea_16(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = cpu_state.regs[cpu_reg].l;
dest = geteal();
if (cpu_state.abrt)
return 1;
temp = src + dest;
seteal(temp);
if (cpu_state.abrt)
return 1;
setadd32(src, dest);
cpu_state.regs[cpu_reg].l = dest;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 4);
return 0;
}
static int
opXADD_l_a32(uint32_t fetchdat)
{
uint32_t temp;
uint32_t src;
uint32_t dest;
fetch_ea_32(fetchdat);
SEG_CHECK_WRITE(cpu_state.ea_seg);
src = cpu_state.regs[cpu_reg].l;
dest = geteal();
if (cpu_state.abrt)
return 1;
temp = src + dest;
seteal(temp);
if (cpu_state.abrt)
return 1;
setadd32(src, dest);
cpu_state.regs[cpu_reg].l = dest;
CLOCK_CYCLES((cpu_mod == 3) ? 3 : 4);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_atomic.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,349 |
```c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* 808x CPU emulation, mostly ported from reenigne's XTCE, which
* is cycle-accurate.
*
* Authors: Andrew Jenner, <path_to_url
* Miran Grca, <mgrca8@gmail.com>
*
*/
#include <math.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <wchar.h>
#define HAVE_STDARG_H
#include <86box/86box.h>
#include "cpu.h"
#include "x86.h"
#include <86box/machine.h>
#include <86box/io.h>
#include <86box/mem.h>
#include <86box/rom.h>
#include <86box/nmi.h>
#include <86box/pic.h>
#include <86box/ppi.h>
#include <86box/timer.h>
#include <86box/gdbstub.h>
/* Is the CPU 8088 or 8086. */
int is8086 = 0;
uint8_t use_custom_nmi_vector = 0;
uint32_t custom_nmi_vector = 0x00000000;
/* The prefetch queue (4 bytes for 8088, 6 bytes for 8086). */
static uint8_t pfq[6];
/* Variables to aid with the prefetch queue operation. */
static int biu_cycles = 0, pfq_pos = 0;
/* The IP equivalent of the current prefetch queue position. */
static uint16_t pfq_ip;
/* Pointer tables needed for segment overrides. */
static uint32_t *opseg[4];
static x86seg *_opseg[4];
static int noint = 0;
static int cpu_alu_op, pfq_size;
static uint32_t cpu_src = 0, cpu_dest = 0;
static uint32_t cpu_data = 0;
static uint16_t last_addr = 0x0000;
static uint32_t *ovr_seg = NULL;
static int prefetching = 1, completed = 1;
static int in_rep = 0, repeating = 0, rep_c_flag = 0;
static int oldc, clear_lock = 0;
static int refresh = 0, cycdiff;
/* Various things needed for 8087. */
#define OP_TABLE(name) ops_##name
#define CPU_BLOCK_END()
#define SEG_CHECK_READ(seg)
#define SEG_CHECK_WRITE(seg)
#define CHECK_READ(a, b, c)
#define CHECK_WRITE(a, b, c)
#define UN_USED(x) (void) (x)
#define fetch_ea_16(val)
#define fetch_ea_32(val)
#define PREFETCH_RUN(a, b, c, d, e, f, g, h)
#define CYCLES(val) \
{ \
wait(val, 0); \
}
#define CLOCK_CYCLES_ALWAYS(val) \
{ \
wait(val, 0); \
}
#if 0
# define CLOCK_CYCLES_FPU(val) \
{ \
wait(val, 0); \
}
# define CLOCK_CYCLES(val) \
{ \
if (fpu_cycles > 0) { \
fpu_cycles -= (val); \
if (fpu_cycles < 0) { \
wait(val, 0); \
} \
} else { \
wait(val, 0); \
} \
}
# define CONCURRENCY_CYCLES(c) fpu_cycles = (c)
#else
# define CLOCK_CYCLES(val) \
{ \
wait(val, 0); \
}
# define CLOCK_CYCLES_FPU(val) \
{ \
wait(val, 0); \
}
# define CONCURRENCY_CYCLES(c)
#endif
typedef int (*OpFn)(uint32_t fetchdat);
static int tempc_fpu = 0;
#ifdef ENABLE_808X_LOG
#if 0
void dumpregs(int);
#endif
int x808x_do_log = ENABLE_808X_LOG;
static void
x808x_log(const char *fmt, ...)
{
va_list ap;
if (x808x_do_log) {
va_start(ap, fmt);
pclog_ex(fmt, ap);
va_end(ap);
}
}
#else
# define x808x_log(fmt, ...)
#endif
static void pfq_add(int c, int add);
static void set_pzs(int bits);
uint16_t
get_last_addr(void)
{
return last_addr;
}
static void
clock_start(void)
{
cycdiff = cycles;
}
static void
clock_end(void)
{
int diff = cycdiff - cycles;
/* On 808x systems, clock speed is usually crystal frequency divided by an integer. */
tsc += (uint64_t) diff * ((uint64_t) xt_cpu_multi >> 32ULL); /* Shift xt_cpu_multi by 32 bits to the right and then multiply. */
if (TIMER_VAL_LESS_THAN_VAL(timer_target, (uint32_t) tsc))
timer_process();
}
static void
fetch_and_bus(int c, int bus)
{
if (refresh > 0) {
/* Finish the current fetch, if any. */
cycles -= ((4 - (biu_cycles & 3)) & 3);
pfq_add((4 - (biu_cycles & 3)) & 3, 1);
/* Add 4 memory access cycles. */
cycles -= 4;
pfq_add(4, 0);
refresh--;
}
pfq_add(c, !bus);
if (bus < 2) {
clock_end();
clock_start();
}
}
static void
wait(int c, int bus)
{
cycles -= c;
fetch_and_bus(c, bus);
}
/* This is for external subtraction of cycles. */
void
sub_cycles(int c)
{
if (c <= 0)
return;
cycles -= c;
if (!is286)
fetch_and_bus(c, 2);
}
void
resub_cycles(int old_cycles)
{
int cyc_diff = 0;
if (old_cycles > cycles) {
cyc_diff = old_cycles - cycles;
cycles = old_cycles;
sub_cycles(cyc_diff);
}
}
#undef readmemb
#undef readmemw
#undef readmeml
#undef readmemq
static void
cpu_io(int bits, int out, uint16_t port)
{
int old_cycles = cycles;
if (out) {
wait(4, 1);
if (bits == 16) {
if (is8086 && !(port & 1)) {
old_cycles = cycles;
outw(port, AX);
} else {
wait(4, 1);
old_cycles = cycles;
outb(port++, AL);
outb(port, AH);
}
} else {
old_cycles = cycles;
outb(port, AL);
}
} else {
wait(4, 1);
if (bits == 16) {
if (is8086 && !(port & 1)) {
old_cycles = cycles;
AX = inw(port);
} else {
wait(4, 1);
old_cycles = cycles;
AL = inb(port++);
AH = inb(port);
}
} else {
old_cycles = cycles;
AL = inb(port);
}
}
resub_cycles(old_cycles);
}
/* Reads a byte from the memory and advances the BIU. */
static uint8_t
readmemb(uint32_t a)
{
uint8_t ret;
wait(4, 1);
ret = read_mem_b(a);
return ret;
}
/* Reads a byte from the memory but does not advance the BIU. */
static uint8_t
readmembf(uint32_t a)
{
uint8_t ret;
a = cs + (a & 0xffff);
ret = read_mem_b(a);
return ret;
}
/* Reads a word from the memory and advances the BIU. */
static uint16_t
readmemw(uint32_t s, uint16_t a)
{
uint16_t ret;
wait(4, 1);
if (is8086 && !(a & 1))
ret = read_mem_w(s + a);
else {
wait(4, 1);
ret = read_mem_b(s + a);
ret |= read_mem_b(s + ((is186 && !is_nec) ? (a + 1) : (a + 1) & 0xffff)) << 8;
}
return ret;
}
static uint16_t
readmemwf(uint16_t a)
{
uint16_t ret;
ret = read_mem_w(cs + (a & 0xffff));
return ret;
}
static uint16_t
readmem(uint32_t s)
{
if (opcode & 1)
return readmemw(s, cpu_state.eaaddr);
else
return (uint16_t) readmemb(s + cpu_state.eaaddr);
}
static uint32_t
readmeml(uint32_t s, uint16_t a)
{
uint32_t temp;
temp = (uint32_t) (readmemw(s, a + 2)) << 16;
temp |= readmemw(s, a);
return temp;
}
static uint64_t
readmemq(uint32_t s, uint16_t a)
{
uint64_t temp;
temp = (uint64_t) (readmeml(s, a + 4)) << 32;
temp |= readmeml(s, a);
return temp;
}
/* Writes a byte to the memory and advances the BIU. */
static void
writememb(uint32_t s, uint32_t a, uint8_t v)
{
uint32_t addr = s + a;
wait(4, 1);
write_mem_b(addr, v);
if ((addr >= 0xf0000) && (addr <= 0xfffff))
last_addr = addr & 0xffff;
}
/* Writes a word to the memory and advances the BIU. */
static void
writememw(uint32_t s, uint32_t a, uint16_t v)
{
uint32_t addr = s + a;
wait(4, 1);
if (is8086 && !(a & 1))
write_mem_w(addr, v);
else {
write_mem_b(addr, v & 0xff);
wait(4, 1);
addr = s + ((is186 && !is_nec) ? (a + 1) : ((a + 1) & 0xffff));
write_mem_b(addr, v >> 8);
}
if ((addr >= 0xf0000) && (addr <= 0xfffff))
last_addr = addr & 0xffff;
}
static void
writemem(uint32_t s, uint16_t v)
{
if (opcode & 1)
writememw(s, cpu_state.eaaddr, v);
else
writememb(s, cpu_state.eaaddr, (uint8_t) (v & 0xff));
}
static void
writememl(uint32_t s, uint32_t a, uint32_t v)
{
writememw(s, a, v & 0xffff);
writememw(s, a + 2, v >> 16);
}
static void
writememq(uint32_t s, uint32_t a, uint64_t v)
{
writememl(s, a, v & 0xffffffff);
writememl(s, a + 4, v >> 32);
}
static void
pfq_write(void)
{
uint16_t tempw;
if (is8086 && (pfq_pos < (pfq_size - 1))) {
/* The 8086 fetches 2 bytes at a time, and only if there's at least 2 bytes
free in the queue. */
tempw = readmemwf(pfq_ip);
*(uint16_t *) &(pfq[pfq_pos]) = tempw;
pfq_ip += 2;
pfq_pos += 2;
} else if (!is8086 && (pfq_pos < pfq_size)) {
/* The 8088 fetches 1 byte at a time, and only if there's at least 1 byte
free in the queue. */
pfq[pfq_pos] = readmembf(pfq_ip);
pfq_ip++;
pfq_pos++;
}
}
static uint8_t
pfq_read(void)
{
uint8_t temp, i;
temp = pfq[0];
for (i = 0; i < (pfq_size - 1); i++)
pfq[i] = pfq[i + 1];
pfq_pos--;
cpu_state.pc = (cpu_state.pc + 1) & 0xffff;
return temp;
}
/* Fetches a byte from the prefetch queue, or from memory if the queue has
been drained. */
static uint8_t
pfq_fetchb_common(void)
{
uint8_t temp;
if (pfq_pos == 0) {
/* Reset prefetch queue internal position. */
pfq_ip = cpu_state.pc;
/* Fill the queue. */
wait(4 - (biu_cycles & 3), 0);
}
/* Fetch. */
temp = pfq_read();
return temp;
}
static uint8_t
pfq_fetchb(void)
{
uint8_t ret;
ret = pfq_fetchb_common();
wait(1, 0);
return ret;
}
/* Fetches a word from the prefetch queue, or from memory if the queue has
been drained. */
static uint16_t
pfq_fetchw(void)
{
uint16_t temp;
temp = pfq_fetchb_common();
wait(1, 0);
temp |= (pfq_fetchb_common() << 8);
return temp;
}
static uint16_t
pfq_fetch(void)
{
if (opcode & 1)
return pfq_fetchw();
else
return (uint16_t) pfq_fetchb();
}
/* Adds bytes to the prefetch queue based on the instruction's cycle count. */
static void
pfq_add(int c, int add)
{
int d;
if ((c <= 0) || (pfq_pos >= pfq_size))
return;
for (d = 0; d < c; d++) {
biu_cycles = (biu_cycles + 1) & 0x03;
if (prefetching && add && (biu_cycles == 0x00))
pfq_write();
}
}
/* Clear the prefetch queue - called on reset and on anything that affects either CS or IP. */
static void
pfq_clear(void)
{
pfq_pos = 0;
prefetching = 0;
}
static void
load_cs(uint16_t seg)
{
cpu_state.seg_cs.base = seg << 4;
cpu_state.seg_cs.seg = seg & 0xffff;
}
static void
load_seg(uint16_t seg, x86seg *s)
{
s->base = seg << 4;
s->seg = seg & 0xffff;
}
void
reset_808x(int hard)
{
biu_cycles = 0;
in_rep = 0;
completed = 1;
repeating = 0;
clear_lock = 0;
refresh = 0;
ovr_seg = NULL;
if (hard) {
opseg[0] = &es;
opseg[1] = &cs;
opseg[2] = &ss;
opseg[3] = &ds;
_opseg[0] = &cpu_state.seg_es;
_opseg[1] = &cpu_state.seg_cs;
_opseg[2] = &cpu_state.seg_ss;
_opseg[3] = &cpu_state.seg_ds;
pfq_size = (is8086) ? 6 : 4;
}
pfq_clear();
load_cs(0xFFFF);
cpu_state.pc = 0;
if (is_nec)
cpu_state.flags |= MD_FLAG;
rammask = 0xfffff;
prefetching = 1;
cpu_alu_op = 0;
use_custom_nmi_vector = 0x00;
custom_nmi_vector = 0x00000000;
}
static void
set_ip(uint16_t new_ip)
{
pfq_ip = cpu_state.pc = new_ip;
prefetching = 1;
}
/* Memory refresh read - called by reads and writes on DMA channel 0. */
void
refreshread(void)
{
refresh++;
}
static uint16_t
get_accum(int bits)
{
return (bits == 16) ? AX : AL;
}
static void
set_accum(int bits, uint16_t val)
{
if (bits == 16)
AX = val;
else
AL = val;
}
static uint16_t
sign_extend(uint8_t data)
{
return data + (data < 0x80 ? 0 : 0xff00);
}
/* Fetches the effective address from the prefetch queue according to MOD and R/M. */
static void
do_mod_rm(void)
{
rmdat = pfq_fetchb();
cpu_reg = (rmdat >> 3) & 7;
cpu_mod = (rmdat >> 6) & 3;
cpu_rm = rmdat & 7;
if (cpu_mod == 3)
return;
wait(1, 0);
if ((rmdat & 0xc7) == 0x06) {
wait(1, 0);
cpu_state.eaaddr = pfq_fetchw();
easeg = ovr_seg ? *ovr_seg : ds;
wait(1, 0);
return;
} else
switch (cpu_rm) {
case 0:
case 3:
wait(2, 0);
break;
case 1:
case 2:
wait(3, 0);
break;
}
cpu_state.eaaddr = (*mod1add[0][cpu_rm]) + (*mod1add[1][cpu_rm]);
easeg = ovr_seg ? *ovr_seg : *mod1seg[cpu_rm];
switch (rmdat & 0xc0) {
case 0x40:
wait(3, 0);
cpu_state.eaaddr += sign_extend(pfq_fetchb());
break;
case 0x80:
wait(3, 0);
cpu_state.eaaddr += pfq_fetchw();
break;
}
cpu_state.eaaddr &= 0xffff;
wait(2, 0);
}
#undef getr8
#define getr8(r) ((r & 4) ? cpu_state.regs[r & 3].b.h : cpu_state.regs[r & 3].b.l)
#undef setr8
#define setr8(r, v) \
if (r & 4) \
cpu_state.regs[r & 3].b.h = v; \
else \
cpu_state.regs[r & 3].b.l = v;
/* Reads a byte from the effective address. */
static uint8_t
geteab(void)
{
if (cpu_mod == 3)
return (getr8(cpu_rm));
return readmemb(easeg + cpu_state.eaaddr);
}
/* Reads a word from the effective address. */
static uint16_t
geteaw(void)
{
if (cpu_mod == 3)
return cpu_state.regs[cpu_rm].w;
return readmemw(easeg, cpu_state.eaaddr);
}
/* Neede for 8087 - memory only. */
static uint32_t
geteal(void)
{
if (cpu_mod == 3) {
fatal("808x register geteal()\n");
return 0xffffffff;
}
return readmeml(easeg, cpu_state.eaaddr);
}
/* Neede for 8087 - memory only. */
static uint64_t
geteaq(void)
{
if (cpu_mod == 3) {
fatal("808x register geteaq()\n");
return 0xffffffff;
}
return readmemq(easeg, cpu_state.eaaddr);
}
static void
read_ea(int memory_only, int bits)
{
if (cpu_mod != 3) {
if (bits == 16)
cpu_data = readmemw(easeg, cpu_state.eaaddr);
else
cpu_data = readmemb(easeg + cpu_state.eaaddr);
return;
}
if (!memory_only) {
if (bits == 8) {
cpu_data = getr8(cpu_rm);
} else
cpu_data = cpu_state.regs[cpu_rm].w;
}
}
static void
read_ea2(int bits)
{
cpu_state.eaaddr = (cpu_state.eaaddr + 2) & 0xffff;
if (bits == 16)
cpu_data = readmemw(easeg, cpu_state.eaaddr);
else
cpu_data = readmemb(easeg + cpu_state.eaaddr);
}
/* Writes a byte to the effective address. */
static void
seteab(uint8_t val)
{
if (cpu_mod == 3) {
setr8(cpu_rm, val);
} else
writememb(easeg, cpu_state.eaaddr, val);
}
/* Writes a word to the effective address. */
static void
seteaw(uint16_t val)
{
if (cpu_mod == 3)
cpu_state.regs[cpu_rm].w = val;
else
writememw(easeg, cpu_state.eaaddr, val);
}
static void
seteal(uint32_t val)
{
if (cpu_mod == 3) {
fatal("808x register seteal()\n");
return;
} else
writememl(easeg, cpu_state.eaaddr, val);
}
static void
seteaq(uint64_t val)
{
if (cpu_mod == 3) {
fatal("808x register seteaq()\n");
return;
} else
writememq(easeg, cpu_state.eaaddr, val);
}
/* Leave out the 686 stuff as it's not needed and
complicates compiling. */
#define FPU_8087
#define tempc tempc_fpu
#include "x87_sf.h"
#include "x87.h"
#include "x87_ops.h"
#undef tempc
#undef FPU_8087
/* Pushes a word to the stack. */
static void
push(uint16_t *val)
{
if ((is186 && !is_nec) && (SP == 1)) {
writememw(ss - 1, 0, *val);
SP = cpu_state.eaaddr = 0xFFFF;
return;
}
SP -= 2;
cpu_state.eaaddr = (SP & 0xffff);
writememw(ss, cpu_state.eaaddr, *val);
}
/* Pops a word from the stack. */
static uint16_t
pop(void)
{
cpu_state.eaaddr = (SP & 0xffff);
SP += 2;
return readmemw(ss, cpu_state.eaaddr);
}
static void
access(int num, int bits)
{
switch (num) {
case 0:
case 61:
case 63:
case 64:
case 67:
case 69:
case 71:
case 72:
default:
break;
case 1:
case 6:
case 7:
case 8:
case 9:
case 17:
case 20:
case 21:
case 24:
case 28:
case 47:
case 48:
case 49:
case 50:
case 51:
case 55:
case 56:
case 62:
case 66:
case 68:
wait(1, 0);
break;
case 3:
case 11:
case 15:
case 22:
case 23:
case 25:
case 26:
case 35:
case 44:
case 45:
case 46:
case 52:
case 53:
case 54:
wait(2, 0);
break;
case 16:
case 18:
case 19:
case 27:
case 32:
case 37:
case 42:
wait(3, 0);
break;
case 10:
case 12:
case 13:
case 14:
case 29:
case 30:
case 33:
case 34:
case 39:
case 41:
case 60:
wait(4, 0);
break;
case 4:
case 70:
wait(5, 0);
break;
case 31:
case 38:
case 40:
wait(6, 0);
break;
case 5:
if (opcode == 0xcc)
wait(7, 0);
else
wait(4, 0);
break;
case 36:
wait(1, 0);
pfq_clear();
wait(1, 0);
if (cpu_mod != 3)
wait(1, 0);
wait(3, 0);
break;
case 43:
wait(2, 0);
pfq_clear();
wait(1, 0);
break;
case 57:
if (cpu_mod != 3)
wait(2, 0);
wait(4, 0);
break;
case 58:
if (cpu_mod != 3)
wait(1, 0);
wait(4, 0);
break;
case 59:
wait(2, 0);
pfq_clear();
if (cpu_mod != 3)
wait(1, 0);
wait(3, 0);
break;
case 65:
wait(1, 0);
pfq_clear();
wait(2, 0);
if (cpu_mod != 3)
wait(1, 0);
break;
}
}
/* Calls an interrupt. */
static void
interrupt(uint16_t addr)
{
uint16_t old_cs, old_ip;
uint16_t new_cs, new_ip;
uint16_t tempf;
addr <<= 2;
cpu_state.eaaddr = addr;
old_cs = CS;
access(5, 16);
new_ip = readmemw(0, cpu_state.eaaddr);
wait(1, 0);
cpu_state.eaaddr = (cpu_state.eaaddr + 2) & 0xffff;
access(6, 16);
new_cs = readmemw(0, cpu_state.eaaddr);
prefetching = 0;
pfq_clear();
ovr_seg = NULL;
access(39, 16);
tempf = cpu_state.flags & (is_nec ? 0x8fd7 : 0x0fd7);
push(&tempf);
cpu_state.flags &= ~(I_FLAG | T_FLAG);
access(40, 16);
push(&old_cs);
old_ip = cpu_state.pc;
load_cs(new_cs);
access(68, 16);
set_ip(new_ip);
access(41, 16);
push(&old_ip);
}
void
interrupt_808x(uint16_t addr)
{
interrupt(addr);
}
static void
custom_nmi(void)
{
uint16_t old_cs, old_ip;
uint16_t new_cs, new_ip;
uint16_t tempf;
cpu_state.eaaddr = 0x0002;
old_cs = CS;
access(5, 16);
(void) readmemw(0, cpu_state.eaaddr);
new_ip = custom_nmi_vector & 0xffff;
wait(1, 0);
cpu_state.eaaddr = (cpu_state.eaaddr + 2) & 0xffff;
access(6, 16);
(void) readmemw(0, cpu_state.eaaddr);
new_cs = custom_nmi_vector >> 16;
prefetching = 0;
pfq_clear();
ovr_seg = NULL;
access(39, 16);
tempf = cpu_state.flags & (is_nec ? 0x8fd7 : 0x0fd7);
push(&tempf);
cpu_state.flags &= ~(I_FLAG | T_FLAG);
access(40, 16);
push(&old_cs);
old_ip = cpu_state.pc;
load_cs(new_cs);
access(68, 16);
set_ip(new_ip);
access(41, 16);
push(&old_ip);
}
static int
irq_pending(void)
{
uint8_t temp;
temp = (nmi && nmi_enable && nmi_mask) || ((cpu_state.flags & T_FLAG) && !noint) || ((cpu_state.flags & I_FLAG) && pic.int_pending && !noint);
return temp;
}
static void
check_interrupts(void)
{
int temp;
if (irq_pending()) {
if ((cpu_state.flags & T_FLAG) && !noint) {
interrupt(1);
return;
}
if (nmi && nmi_enable && nmi_mask) {
nmi_enable = 0;
if (use_custom_nmi_vector)
custom_nmi();
else
interrupt(2);
#ifndef OLD_NMI_BEHAVIOR
nmi = 0;
#endif
return;
}
if ((cpu_state.flags & I_FLAG) && pic.int_pending && !noint) {
repeating = 0;
completed = 1;
ovr_seg = NULL;
wait(3, 0);
/* ACK to PIC */
temp = pic_irq_ack();
wait(4, 1);
wait(1, 0);
/* ACK to PIC */
temp = pic_irq_ack();
wait(4, 1);
wait(1, 0);
in_lock = 0;
clear_lock = 0;
wait(1, 0);
/* Here is where temp should be filled, but we cheat. */
wait(3, 0);
opcode = 0x00;
interrupt(temp);
}
}
}
static int
rep_action(int bits)
{
uint16_t t;
if (in_rep == 0)
return 0;
wait(2, 0);
t = CX;
if (irq_pending() && (repeating != 0)) {
access(71, bits);
pfq_clear();
if (is_nec && (ovr_seg != NULL))
set_ip(cpu_state.pc - 3);
else
set_ip(cpu_state.pc - 2);
t = 0;
}
if (t == 0) {
wait(1, 0);
completed = 1;
repeating = 0;
return 1;
}
--CX;
completed = 0;
wait(2, 0);
if (!repeating)
wait(2, 0);
return 0;
}
static uint16_t
jump(uint16_t delta)
{
uint16_t old_ip;
access(67, 8);
pfq_clear();
wait(5, 0);
old_ip = cpu_state.pc;
set_ip((cpu_state.pc + delta) & 0xffff);
return old_ip;
}
static void
jump_short(void)
{
jump(sign_extend((uint8_t) cpu_data));
}
static uint16_t
jump_near(void)
{
return jump(pfq_fetchw());
}
/* Performs a conditional jump. */
static void
jcc(uint8_t opcode, int cond)
{
/* int8_t offset; */
wait(1, 0);
cpu_data = pfq_fetchb();
wait(1, 0);
if ((!cond) == !!(opcode & 0x01))
jump_short();
}
static void
set_cf(int cond)
{
cpu_state.flags = (cpu_state.flags & ~C_FLAG) | (cond ? C_FLAG : 0);
}
static void
set_if(int cond)
{
cpu_state.flags = (cpu_state.flags & ~I_FLAG) | (cond ? I_FLAG : 0);
}
static void
set_df(int cond)
{
cpu_state.flags = (cpu_state.flags & ~D_FLAG) | (cond ? D_FLAG : 0);
}
static void
bitwise(int bits, uint16_t data)
{
cpu_data = data;
cpu_state.flags &= ~(C_FLAG | A_FLAG | V_FLAG);
set_pzs(bits);
}
static void
test(int bits, uint16_t dest, uint16_t src)
{
cpu_dest = dest;
cpu_src = src;
bitwise(bits, (cpu_dest & cpu_src));
}
static void
set_of(int of)
{
cpu_state.flags = (cpu_state.flags & ~0x800) | (of ? 0x800 : 0);
}
static int
top_bit(uint16_t w, int bits)
{
return (w & (1 << (bits - 1)));
}
static void
set_of_add(int bits)
{
set_of(top_bit((cpu_data ^ cpu_src) & (cpu_data ^ cpu_dest), bits));
}
static void
set_of_sub(int bits)
{
set_of(top_bit((cpu_dest ^ cpu_src) & (cpu_data ^ cpu_dest), bits));
}
static void
set_af(int af)
{
cpu_state.flags = (cpu_state.flags & ~0x10) | (af ? 0x10 : 0);
}
static void
do_af(void)
{
set_af(((cpu_data ^ cpu_src ^ cpu_dest) & 0x10) != 0);
}
static void
set_apzs(int bits)
{
set_pzs(bits);
do_af();
}
static void
add(int bits)
{
int size_mask = (1 << bits) - 1;
int special_case = 0;
uint32_t temp_src = cpu_src;
if ((cpu_alu_op == 2) && !(cpu_src & size_mask) && (cpu_state.flags & C_FLAG))
special_case = 1;
cpu_data = cpu_dest + cpu_src;
if ((cpu_alu_op == 2) && (cpu_state.flags & C_FLAG))
cpu_src--;
set_apzs(bits);
set_of_add(bits);
/* Anything - FF with carry on is basically anything + 0x100: value stays
unchanged but carry goes on. */
if (special_case)
cpu_state.flags |= C_FLAG;
else
set_cf((temp_src & size_mask) > (cpu_data & size_mask));
}
static void
sub(int bits)
{
int size_mask = (1 << bits) - 1;
int special_case = 0;
uint32_t temp_src = cpu_src;
if ((cpu_alu_op == 3) && !(cpu_src & size_mask) && (cpu_state.flags & C_FLAG))
special_case = 1;
cpu_data = cpu_dest - cpu_src;
if ((cpu_alu_op == 3) && (cpu_state.flags & C_FLAG))
cpu_src--;
set_apzs(bits);
set_of_sub(bits);
/* Anything - FF with carry on is basically anything - 0x100: value stays
unchanged but carry goes on. */
if (special_case)
cpu_state.flags |= C_FLAG;
else
set_cf((temp_src & size_mask) > (cpu_dest & size_mask));
}
static void
alu_op(int bits)
{
switch (cpu_alu_op) {
case 1:
bitwise(bits, (cpu_dest | cpu_src));
break;
case 2:
if (cpu_state.flags & C_FLAG)
cpu_src++;
/* Fall through. */
case 0:
add(bits);
break;
case 3:
if (cpu_state.flags & C_FLAG)
cpu_src++;
/* Fall through. */
case 5:
case 7:
sub(bits);
break;
case 4:
test(bits, cpu_dest, cpu_src);
break;
case 6:
bitwise(bits, (cpu_dest ^ cpu_src));
break;
}
}
static void
set_sf(int bits)
{
cpu_state.flags = (cpu_state.flags & ~0x80) | (top_bit(cpu_data, bits) ? 0x80 : 0);
}
static void
set_pf(void)
{
cpu_state.flags = (cpu_state.flags & ~4) | (!__builtin_parity(cpu_data & 0xFF) << 2);
}
static void
mul(uint16_t a, uint16_t b)
{
int negate = 0;
int bit_count = 8;
int carry, i;
uint16_t high_bit = 0x80;
uint16_t size_mask;
uint16_t c, r;
size_mask = (1 << bit_count) - 1;
if (opcode != 0xd5) {
if (opcode & 1) {
bit_count = 16;
high_bit = 0x8000;
} else
wait(8, 0);
size_mask = (1 << bit_count) - 1;
if ((rmdat & 0x38) == 0x28) {
if (!top_bit(a, bit_count)) {
if (top_bit(b, bit_count)) {
wait(1, 0);
if ((b & size_mask) != ((opcode & 1) ? 0x8000 : 0x80))
wait(1, 0);
b = ~b + 1;
negate = 1;
}
} else {
wait(1, 0);
a = ~a + 1;
negate = 1;
if (top_bit(b, bit_count)) {
b = ~b + 1;
negate = 0;
} else
wait(4, 0);
}
wait(10, 0);
}
wait(3, 0);
}
c = 0;
a &= size_mask;
carry = (a & 1) != 0;
a >>= 1;
for (i = 0; i < bit_count; ++i) {
wait(7, 0);
if (carry) {
cpu_src = c;
cpu_dest = b;
add(bit_count);
c = cpu_data & size_mask;
wait(1, 0);
carry = !!(cpu_state.flags & C_FLAG);
}
r = (c >> 1) + (carry ? high_bit : 0);
carry = (c & 1) != 0;
c = r;
r = (a >> 1) + (carry ? high_bit : 0);
carry = (a & 1) != 0;
a = r;
}
if (negate) {
c = ~c;
a = (~a + 1) & size_mask;
if (a == 0)
++c;
wait(9, 0);
}
cpu_data = a;
cpu_dest = c;
set_sf(bit_count);
set_pf();
set_af(0);
}
static void
set_of_rotate(int bits)
{
set_of(top_bit(cpu_data ^ cpu_dest, bits));
}
static void
set_zf_ex(int zf)
{
cpu_state.flags = (cpu_state.flags & ~0x40) | (zf ? 0x40 : 0);
}
static void
set_zf(int bits)
{
int size_mask = (1 << bits) - 1;
set_zf_ex((cpu_data & size_mask) == 0);
}
static void
set_pzs(int bits)
{
set_pf();
set_zf(bits);
set_sf(bits);
}
static void
set_co_mul(int bits, int carry)
{
set_cf(carry);
set_of(carry);
set_zf_ex(!carry);
if (!carry)
wait(1, 0);
}
/* Was div(), renamed to avoid conflicts with stdlib div(). */
static int
x86_div(uint16_t l, uint16_t h)
{
int b, bit_count = 8;
int negative = 0;
int dividend_negative = 0;
int size_mask, carry;
uint16_t r;
if (opcode & 1) {
l = AX;
h = DX;
bit_count = 16;
}
size_mask = (1 << bit_count) - 1;
if (opcode != 0xd4) {
if ((rmdat & 0x38) == 0x38) {
if (top_bit(h, bit_count)) {
h = ~h;
l = (~l + 1) & size_mask;
if (l == 0)
++h;
h &= size_mask;
negative = 1;
dividend_negative = 1;
wait(4, 0);
}
if (top_bit(cpu_src, bit_count)) {
cpu_src = ~cpu_src + 1;
negative = !negative;
} else
wait(1, 0);
wait(9, 0);
}
wait(3, 0);
}
wait(8, 0);
cpu_src &= size_mask;
if (h >= cpu_src) {
if (opcode != 0xd4)
wait(1, 0);
interrupt(0);
return 0;
}
if (opcode != 0xd4)
wait(1, 0);
wait(2, 0);
carry = 1;
for (b = 0; b < bit_count; ++b) {
r = (l << 1) + (carry ? 1 : 0);
carry = top_bit(l, bit_count);
l = r;
r = (h << 1) + (carry ? 1 : 0);
carry = top_bit(h, bit_count);
h = r;
wait(8, 0);
if (carry) {
carry = 0;
h -= cpu_src;
if (b == bit_count - 1)
wait(2, 0);
} else {
carry = cpu_src > h;
if (!carry) {
h -= cpu_src;
wait(1, 0);
if (b == bit_count - 1)
wait(2, 0);
}
}
}
l = ~((l << 1) + (carry ? 1 : 0));
if (opcode != 0xd4 && (rmdat & 0x38) == 0x38) {
wait(4, 0);
if (top_bit(l, bit_count)) {
if (cpu_mod == 3)
wait(1, 0);
interrupt(0);
return 0;
}
wait(7, 0);
if (negative)
l = ~l + 1;
if (dividend_negative)
h = ~h + 1;
}
if (opcode == 0xd4) {
AL = h & 0xff;
AH = l & 0xff;
} else {
AH = h & 0xff;
AL = l & 0xff;
if (opcode & 1) {
DX = h;
AX = l;
}
}
return 1;
}
static uint16_t
string_increment(int bits)
{
int d = bits >> 3;
if (cpu_state.flags & D_FLAG)
cpu_state.eaaddr -= d;
else
cpu_state.eaaddr += d;
cpu_state.eaaddr &= 0xffff;
return cpu_state.eaaddr;
}
static void
lods(int bits)
{
cpu_state.eaaddr = SI;
if (bits == 16)
cpu_data = readmemw((ovr_seg ? *ovr_seg : ds), cpu_state.eaaddr);
else
cpu_data = readmemb((ovr_seg ? *ovr_seg : ds) + cpu_state.eaaddr);
SI = string_increment(bits);
}
static void
stos(int bits)
{
cpu_state.eaaddr = DI;
if (bits == 16)
writememw(es, cpu_state.eaaddr, cpu_data);
else
writememb(es, cpu_state.eaaddr, (uint8_t) (cpu_data & 0xff));
DI = string_increment(bits);
}
static void
aa(void)
{
set_pzs(8);
AL = cpu_data & 0x0f;
wait(6, 0);
}
static void
set_ca(void)
{
set_cf(1);
set_af(1);
}
static void
clear_ca(void)
{
set_cf(0);
set_af(0);
}
static uint16_t
get_ea(void)
{
if (opcode & 1)
return geteaw();
else
return (uint16_t) geteab();
}
static uint16_t
get_reg(uint8_t reg)
{
if (opcode & 1)
return cpu_state.regs[reg].w;
else
return (uint16_t) getr8(reg);
}
static void
set_ea(uint16_t val)
{
if (opcode & 1)
seteaw(val);
else
seteab((uint8_t) (val & 0xff));
}
static void
set_reg(uint8_t reg, uint16_t val)
{
if (opcode & 1)
cpu_state.regs[reg].w = val;
else
setr8(reg, (uint8_t) (val & 0xff));
}
static void
cpu_data_opff_rm(void)
{
if (!(opcode & 1)) {
if (cpu_mod != 3)
cpu_data |= 0xff00;
else
cpu_data = cpu_state.regs[cpu_rm].w;
}
}
uint16_t
cpu_inw(uint16_t port)
{
if (is8086 && !(port & 1)) {
wait(4, 0);
} else {
wait(8, 0);
}
return inw(port);
}
void
cpu_outw(uint16_t port, uint16_t val)
{
if (is8086 && !(port & 1)) {
wait(4, 0);
} else {
wait(8, 0);
}
return outw(port, val);
}
/* Executes instructions up to the specified number of cycles. */
void
execx86(int cycs)
{
uint8_t temp = 0, temp2, old_af, nests;
uint8_t temp_val, temp_al, bit, handled = 0;
uint8_t odd, zero, nibbles_count, destcmp;
uint8_t destbyte, srcbyte, nibble_result, bit_length;
uint8_t bit_offset;
int8_t nibble_result_s;
uint16_t addr, tempw, new_cs, new_ip;
uint16_t tempw_int, size, tempbp, lowbound;
uint16_t highbound, regval, orig_sp, wordtopush;
uint16_t immediate, old_flags;
int bits;
uint32_t dest_seg, i, carry, nibble;
uint32_t srcseg, byteaddr;
cycles += cycs;
while (cycles > 0) {
clock_start();
if (!repeating) {
cpu_state.oldpc = cpu_state.pc;
opcode = pfq_fetchb();
handled = 0;
oldc = cpu_state.flags & C_FLAG;
if (clear_lock) {
in_lock = 0;
clear_lock = 0;
}
wait(1, 0);
}
completed = 1;
// pclog("[%04X:%04X] Opcode: %02X\n", CS, cpu_state.pc, opcode);
if (is186) {
switch (opcode) {
case 0x60: /*PUSHA/PUSH R*/
orig_sp = SP;
wait(1, 0);
push(&AX);
push(&CX);
push(&DX);
push(&BX);
push(&orig_sp);
push(&BP);
push(&SI);
push(&DI);
handled = 1;
break;
case 0x61: /*POPA/POP R*/
wait(9, 0);
DI = pop();
SI = pop();
BP = pop();
(void) pop(); /* former orig_sp */
BX = pop();
DX = pop();
CX = pop();
AX = pop();
handled = 1;
break;
case 0x62: /* BOUND r/m */
lowbound = 0;
highbound = 0;
regval = 0;
do_mod_rm();
lowbound = readmemw(easeg, cpu_state.eaaddr);
highbound = readmemw(easeg, cpu_state.eaaddr + 2);
regval = get_reg(cpu_reg);
if (lowbound > regval || highbound < regval) {
cpu_state.pc = cpu_state.oldpc;
interrupt(5);
}
handled = 1;
break;
case 0x64:
case 0x65:
if (is_nec) {
/* REPC/REPNC */
wait(1, 0);
in_rep = (opcode == 0x64 ? 1 : 2);
rep_c_flag = 1;
completed = 0;
handled = 1;
}
break;
case 0x68:
wordtopush = pfq_fetchw();
wait(1, 0);
push(&wordtopush);
handled = 1;
break;
case 0x69:
immediate = 0;
bits = 16;
do_mod_rm();
read_ea(0, 16);
immediate = pfq_fetchw();
mul(cpu_data & 0xFFFF, immediate);
set_reg(cpu_reg, cpu_data);
set_co_mul(16, cpu_dest != 0);
handled = 1;
break;
case 0x6a:
wordtopush = sign_extend(pfq_fetchb());
push(&wordtopush);
handled = 1;
break;
case 0x6b: /* IMUL reg16,reg16/mem16,imm8 */
immediate = 0;
bits = 16;
do_mod_rm();
read_ea(0, 16);
immediate = pfq_fetchb();
mul(cpu_data & 0xFFFF, immediate);
set_reg(cpu_reg, cpu_data);
set_co_mul(16, cpu_dest != 0);
handled = 1;
break;
case 0x6c:
case 0x6d: /* INM dst, DW/INS dst, DX */
bits = 8 << (opcode & 1);
handled = 1;
if (!repeating)
wait(2, 0);
if (rep_action(bits))
break;
else if (!repeating)
wait(7, 0);
if (bits == 16) {
writememw(es, DI, cpu_inw(DX));
DI += (cpu_state.flags & D_FLAG) ? -2 : 2;
} else {
wait(4, 0);
writememb(es, DI, inb(DX));
DI += (cpu_state.flags & D_FLAG) ? -1 : 1;
}
if (in_rep == 0)
break;
repeating = 1;
clock_end();
break;
case 0x6e:
case 0x6f: /* OUTM DW, src/OUTS DX, src */
dest_seg = ovr_seg ? *ovr_seg : ds;
bits = 8 << (opcode & 1);
handled = 1;
if (!repeating)
wait(2, 0);
if (rep_action(bits))
break;
else if (!repeating)
wait(7, 0);
if (bits == 16) {
cpu_outw(DX, readmemw(dest_seg, SI));
SI += (cpu_state.flags & D_FLAG) ? -2 : 2;
} else {
wait(4, 0);
outb(DX, readmemb(dest_seg + SI));
SI += (cpu_state.flags & D_FLAG) ? -1 : 1;
}
if (in_rep == 0)
break;
repeating = 1;
clock_end();
break;
case 0xc8: /* ENTER/PREPARE */
tempw_int = 0;
size = pfq_fetchw();
nests = pfq_fetchb();
i = 0;
push(&BP);
tempw_int = SP;
if (nests > 0) {
while (--nests) {
tempbp = 0;
BP -= 2;
tempbp = readmemw(ss, BP);
push(&tempbp);
}
push(&tempw_int);
}
BP = tempw_int;
SP -= size;
handled = 1;
break;
case 0xc0:
case 0xc1: /*rot imm8 */
bits = 8 << (opcode & 1);
do_mod_rm();
if (cpu_mod == 3)
wait(1, 0);
access(53, bits);
cpu_data = get_ea();
cpu_src = pfq_fetchb();
wait((cpu_mod != 3) ? 9 : 6, 0);
if (!is_nec)
cpu_src &= 0x1F;
while (cpu_src != 0) {
cpu_dest = cpu_data;
oldc = cpu_state.flags & C_FLAG;
switch (rmdat & 0x38) {
case 0x00: /* ROL */
set_cf(top_bit(cpu_data, bits));
cpu_data <<= 1;
cpu_data |= ((cpu_state.flags & C_FLAG) ? 1 : 0);
set_of_rotate(bits);
set_af(0);
break;
case 0x08: /* ROR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
if (cpu_state.flags & C_FLAG)
cpu_data |= (!(opcode & 1) ? 0x80 : 0x8000);
set_of_rotate(bits);
set_af(0);
break;
case 0x10: /* RCL */
set_cf(top_bit(cpu_data, bits));
cpu_data = (cpu_data << 1) | (oldc ? 1 : 0);
set_of_rotate(bits);
set_af(0);
break;
case 0x18: /* RCR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
if (oldc)
cpu_data |= (!(opcode & 0x01) ? 0x80 : 0x8000);
set_cf((cpu_dest & 1) != 0);
set_of_rotate(bits);
set_af(0);
break;
case 0x20: /* SHL */
set_cf(top_bit(cpu_data, bits));
cpu_data <<= 1;
set_of_rotate(bits);
set_af((cpu_data & 0x10) != 0);
set_pzs(bits);
break;
case 0x28: /* SHR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
set_of_rotate(bits);
set_af(0);
set_pzs(bits);
break;
case 0x30: /* SETMO - undocumented? */
bitwise(bits, 0xffff);
set_cf(0);
set_of_rotate(bits);
set_af(0);
set_pzs(bits);
break;
case 0x38: /* SAR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
if (!(opcode & 1))
cpu_data |= (cpu_dest & 0x80);
else
cpu_data |= (cpu_dest & 0x8000);
set_of_rotate(bits);
set_af(0);
set_pzs(bits);
break;
}
if ((opcode & 2) != 0)
wait(4, 0);
--cpu_src;
}
access(17, bits);
set_ea(cpu_data);
handled = 1;
break;
case 0xc9: /* LEAVE/DISPOSE */
SP = BP;
BP = pop();
handled = 1;
break;
}
}
if (!handled) {
switch (opcode) {
case 0x06:
case 0x0E:
case 0x16:
case 0x1E: /* PUSH seg */
access(29, 16);
push(&(_opseg[(opcode >> 3) & 0x03]->seg));
break;
case 0x07:
case 0x0F:
case 0x17:
case 0x1F: /* POP seg */
if (is_nec && (opcode == 0x0F)) {
uint8_t orig_opcode = opcode;
opcode = pfq_fetchb();
switch (opcode) {
case 0x28: /* ROL4 r/m */
do_mod_rm();
wait(21, 0);
temp_val = geteab();
temp_al = AL;
temp_al &= 0xF;
temp_al |= (temp_val & 0xF0);
temp_val = (temp_al & 0xF) | ((temp_val & 0xF) << 4);
temp_al >>= 4;
temp_al &= 0xF;
seteab(temp_val);
AL = temp_al;
handled = 1;
break;
case 0x2a: /* ROR4 r/m */
do_mod_rm();
wait(21, 0);
temp_val = geteab();
temp_al = AL;
AL = temp_val & 0xF;
temp_val = (temp_val >> 4) | ((temp_al & 0xF) << 4);
seteab(temp_val);
handled = 1;
break;
case 0x10: /* TEST1 r8/m8, CL*/
case 0x11: /* TEST1 r16/m16, CL*/
case 0x18: /* TEST1 r8/m8, imm3 */
case 0x19: /* TEST1 r16/m16, imm4 */
bits = 8 << (opcode & 0x1);
do_mod_rm();
wait(3, 0);
bit = (opcode & 0x8) ? (pfq_fetchb()) : (CL);
bit &= ((1 << (3 + (opcode & 0x1))) - 1);
read_ea(0, bits);
set_zf_ex(!(cpu_data & (1 << bit)));
cpu_state.flags &= ~(V_FLAG | C_FLAG);
handled = 1;
break;
case 0x16: /* NOT1 r8/m8, CL*/
case 0x17: /* NOT1 r16/m16, CL*/
case 0x1e: /* NOT1 r8/m8, imm3 */
case 0x1f: /* NOT1 r16/m16, imm4 */
bits = 8 << (opcode & 0x1);
do_mod_rm();
wait(3, 0);
bit = (opcode & 0x8) ? (pfq_fetchb()) : (CL);
bit &= ((1 << (3 + (opcode & 0x1))) - 1);
read_ea(0, bits);
if (bits == 8)
seteab((cpu_data & 0xFF) ^ (1 << bit));
else
seteaw((cpu_data & 0xFFFF) ^ (1 << bit));
handled = 1;
break;
case 0x14: /* SET1 r8/m8, CL*/
case 0x15: /* SET1 r16/m16, CL*/
case 0x1c: /* SET1 r8/m8, imm3 */
case 0x1d: /* SET1 r16/m16, imm4 */
bits = 8 << (opcode & 0x1);
do_mod_rm();
wait(3, 0);
bit = (opcode & 0x8) ? (pfq_fetchb()) : (CL);
bit &= ((1 << (3 + (opcode & 0x1))) - 1);
read_ea(0, bits);
if (bits == 8)
seteab((cpu_data & 0xFF) | (1 << bit));
else
seteaw((cpu_data & 0xFFFF) | (1 << bit));
handled = 1;
break;
case 0x12: /* CLR1 r8/m8, CL*/
case 0x13: /* CLR1 r16/m16, CL*/
case 0x1a: /* CLR1 r8/m8, imm3 */
case 0x1b: /* CLR1 r16/m16, imm4 */
bits = 8 << (opcode & 0x1);
do_mod_rm();
wait(3, 0);
bit = (opcode & 0x8) ? (pfq_fetchb()) : (CL);
bit &= ((1 << (3 + (opcode & 0x1))) - 1);
read_ea(0, bits);
if (bits == 8)
seteab((cpu_data & 0xFF) & ~(1 << bit));
else
seteaw((cpu_data & 0xFFFF) & ~(1 << bit));
handled = 1;
break;
case 0x20: /* ADD4S */
odd = !!(CL % 2);
zero = 1;
nibbles_count = CL - odd;
i = 0;
carry = 0;
nibble = 0;
srcseg = ovr_seg ? *ovr_seg : ds;
wait(5, 0);
for (i = 0; i < ((nibbles_count / 2) + odd); i++) {
wait(19, 0);
destcmp = read_mem_b((es) + DI + i);
for (nibble = 0; nibble < 2; nibble++) {
destbyte = destcmp >> (nibble ? 4 : 0);
srcbyte = read_mem_b(srcseg + SI + i) >> (nibble ? 4 : 0);
destbyte &= 0xF;
srcbyte &= 0xF;
nibble_result = (i == (nibbles_count / 2) && nibble == 1) ? (destbyte + carry) : ((uint8_t) (destbyte)) + ((uint8_t) (srcbyte)) + ((uint32_t) carry);
carry = 0;
while (nibble_result >= 10) {
nibble_result -= 10;
carry++;
}
if (zero != 0 || (i == (nibbles_count / 2) && nibble == 1))
zero = (nibble_result == 0);
destcmp = ((destcmp & (nibble ? 0x0F : 0xF0)) | (nibble_result << (4 * nibble)));
}
write_mem_b(es + DI + i, destcmp);
}
set_cf(!!carry);
set_zf(!!zero);
handled = 1;
break;
case 0x22: /* SUB4S */
odd = !!(CL % 2);
zero = 1;
nibbles_count = CL - odd;
i = 0;
carry = 0;
nibble = 0;
srcseg = ovr_seg ? *ovr_seg : ds;
wait(5, 0);
for (i = 0; i < ((nibbles_count / 2) + odd); i++) {
wait(19, 0);
destcmp = read_mem_b((es) + DI + i);
for (nibble = 0; nibble < 2; nibble++) {
destbyte = destcmp >> (nibble ? 4 : 0);
srcbyte = read_mem_b(srcseg + SI + i) >> (nibble ? 4 : 0);
destbyte &= 0xF;
srcbyte &= 0xF;
nibble_result_s = (i == (nibbles_count / 2) && nibble == 1) ? ((int8_t) destbyte - (int8_t) carry) : ((int8_t) (destbyte)) - ((int8_t) (srcbyte)) - ((int8_t) carry);
carry = 0;
while (nibble_result_s < 0) {
nibble_result_s += 10;
carry++;
}
if (zero != 0 || (i == (nibbles_count / 2) && nibble == 1))
zero = (nibble_result_s == 0);
destcmp = ((destcmp & (nibble ? 0x0F : 0xF0)) | (nibble_result_s << (4 * nibble)));
}
write_mem_b(es + DI + i, destcmp);
}
set_cf(!!carry);
set_zf(!!zero);
handled = 1;
break;
case 0x26: /* CMP4S */
odd = !!(CL % 2);
zero = 1;
nibbles_count = CL - odd;
i = 0;
carry = 0;
nibble = 0;
srcseg = ovr_seg ? *ovr_seg : ds;
wait(5, 0);
for (i = 0; i < ((nibbles_count / 2) + odd); i++) {
wait(19, 0);
destcmp = read_mem_b((es) + DI + i);
for (nibble = 0; nibble < 2; nibble++) {
destbyte = destcmp >> (nibble ? 4 : 0);
srcbyte = read_mem_b(srcseg + SI + i) >> (nibble ? 4 : 0);
destbyte &= 0xF;
srcbyte &= 0xF;
nibble_result_s = ((int8_t) (destbyte)) - ((int8_t) (srcbyte)) - ((int8_t) carry);
carry = 0;
while (nibble_result_s < 0) {
nibble_result_s += 10;
carry++;
}
if (zero != 0 || (i == (nibbles_count / 2) && nibble == 1))
zero = (nibble_result_s == 0);
destcmp = ((destcmp & (nibble ? 0x0F : 0xF0)) | (nibble_result_s << (4 * nibble)));
}
}
set_cf(!!carry);
set_zf(!!zero);
handled = 1;
break;
case 0x31: /* INS reg1, reg2 */
case 0x39: /* INS reg8, imm4 */
do_mod_rm();
wait(1, 0);
bit_length = ((opcode & 0x8) ? (pfq_fetchb() & 0xF) : (getr8(cpu_reg) & 0xF)) + 1;
bit_offset = getr8(cpu_rm) & 0xF;
byteaddr = (es) + DI;
i = 0;
if (bit_offset >= 8) {
DI++;
byteaddr++;
bit_offset -= 8;
}
for (i = 0; i < bit_length; i++) {
byteaddr = (es) + DI;
writememb(es, DI, (read_mem_b(byteaddr) & ~(1 << (bit_offset))) | ((!!(AX & (1 << i))) << bit_offset));
bit_offset++;
if (bit_offset == 8) {
DI++;
bit_offset = 0;
}
}
setr8(cpu_rm, bit_offset);
handled = 1;
break;
case 0x33: /* EXT reg1, reg2 */
case 0x3b: /* EXT reg8, imm4 */
do_mod_rm();
wait(1, 0);
bit_length = ((opcode & 0x8) ? (pfq_fetchb() & 0xF) : (getr8(cpu_reg) & 0xF)) + 1;
bit_offset = getr8(cpu_rm) & 0xF;
byteaddr = (ds) + SI;
i = 0;
if (bit_offset >= 8) {
SI++;
byteaddr++;
bit_offset -= 8;
}
AX = 0;
for (i = 0; i < bit_length; i++) {
byteaddr = (ds) + SI;
AX |= (!!(readmemb(byteaddr) & (1 << bit_offset))) << i;
bit_offset++;
if (bit_offset == 8) {
SI++;
bit_offset = 0;
}
}
setr8(cpu_rm, bit_offset);
handled = 1;
break;
case 0xFF: /* BRKEM */
/* Unimplemented for now. */
fatal("808x: Unsupported 8080 emulation mode attempted to enter into!");
break;
default:
opcode = orig_opcode;
cpu_state.pc--;
break;
}
} else
handled = 0;
if (handled)
break;
access(22, 16);
if (opcode == 0x0F) {
load_cs(pop());
pfq_pos = 0;
} else
load_seg(pop(), _opseg[(opcode >> 3) & 0x03]);
wait(1, 0);
/* All POP segment instructions suppress interrupts for one instruction. */
noint = 1;
break;
case 0x26: /*ES:*/
case 0x2E: /*CS:*/
case 0x36: /*SS:*/
case 0x3E: /*DS:*/
wait(1, 0);
ovr_seg = opseg[(opcode >> 3) & 0x03];
completed = 0;
break;
case 0x00:
case 0x01:
case 0x02:
case 0x03:
case 0x08:
case 0x09:
case 0x0a:
case 0x0b:
case 0x10:
case 0x11:
case 0x12:
case 0x13:
case 0x18:
case 0x19:
case 0x1a:
case 0x1b:
case 0x20:
case 0x21:
case 0x22:
case 0x23:
case 0x28:
case 0x29:
case 0x2a:
case 0x2b:
case 0x30:
case 0x31:
case 0x32:
case 0x33:
case 0x38:
case 0x39:
case 0x3a:
case 0x3b:
/* alu rm, r / r, rm */
bits = 8 << (opcode & 1);
do_mod_rm();
access(46, bits);
tempw = get_ea();
cpu_alu_op = (opcode >> 3) & 7;
if ((opcode & 2) == 0) {
cpu_dest = tempw;
cpu_src = get_reg(cpu_reg);
} else {
cpu_dest = get_reg(cpu_reg);
cpu_src = tempw;
}
if (cpu_mod != 3)
wait(2, 0);
wait(1, 0);
alu_op(bits);
if (cpu_alu_op != 7) {
if ((opcode & 2) == 0) {
access(10, bits);
set_ea(cpu_data);
if (cpu_mod == 3)
wait(1, 0);
} else {
set_reg(cpu_reg, cpu_data);
wait(1, 0);
}
} else
wait(1, 0);
break;
case 0x04:
case 0x05:
case 0x0c:
case 0x0d:
case 0x14:
case 0x15:
case 0x1c:
case 0x1d:
case 0x24:
case 0x25:
case 0x2c:
case 0x2d:
case 0x34:
case 0x35:
case 0x3c:
case 0x3d:
/* alu A, imm */
bits = 8 << (opcode & 1);
wait(1, 0);
cpu_data = pfq_fetch();
cpu_dest = get_accum(bits); /* AX/AL */
cpu_src = cpu_data;
cpu_alu_op = (opcode >> 3) & 7;
alu_op(bits);
if (cpu_alu_op != 7)
set_accum(bits, cpu_data);
wait(1, 0);
break;
case 0x27: /*DAA*/
cpu_dest = AL;
set_of(0);
old_af = !!(cpu_state.flags & A_FLAG);
if ((cpu_state.flags & A_FLAG) || (AL & 0x0f) > 9) {
cpu_src = 6;
cpu_data = cpu_dest + cpu_src;
set_of_add(8);
cpu_dest = cpu_data;
set_af(1);
}
if ((cpu_state.flags & C_FLAG) || AL > (old_af ? 0x9f : 0x99)) {
cpu_src = 0x60;
cpu_data = cpu_dest + cpu_src;
set_of_add(8);
cpu_dest = cpu_data;
set_cf(1);
}
AL = cpu_dest;
set_pzs(8);
wait(3, 0);
break;
case 0x2F: /*DAS*/
cpu_dest = AL;
set_of(0);
old_af = !!(cpu_state.flags & A_FLAG);
if ((cpu_state.flags & A_FLAG) || ((AL & 0xf) > 9)) {
cpu_src = 6;
cpu_data = cpu_dest - cpu_src;
set_of_sub(8);
cpu_dest = cpu_data;
set_af(1);
}
if ((cpu_state.flags & C_FLAG) || AL > (old_af ? 0x9f : 0x99)) {
cpu_src = 0x60;
cpu_data = cpu_dest - cpu_src;
set_of_sub(8);
cpu_dest = cpu_data;
set_cf(1);
}
AL = cpu_dest;
set_pzs(8);
wait(3, 0);
break;
case 0x37: /*AAA*/
wait(1, 0);
if ((cpu_state.flags & A_FLAG) || ((AL & 0xf) > 9)) {
cpu_src = 6;
++AH;
set_ca();
} else {
cpu_src = 0;
clear_ca();
wait(1, 0);
}
cpu_dest = AL;
cpu_data = cpu_dest + cpu_src;
set_of_add(8);
aa();
break;
case 0x3F: /*AAS*/
wait(1, 0);
if ((cpu_state.flags & A_FLAG) || ((AL & 0xf) > 9)) {
cpu_src = 6;
--AH;
set_ca();
} else {
cpu_src = 0;
clear_ca();
wait(1, 0);
}
cpu_dest = AL;
cpu_data = cpu_dest - cpu_src;
set_of_sub(8);
aa();
break;
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x44:
case 0x45:
case 0x46:
case 0x47:
case 0x48:
case 0x49:
case 0x4A:
case 0x4B:
case 0x4C:
case 0x4D:
case 0x4E:
case 0x4F:
/* INCDEC rw */
wait(1, 0);
cpu_dest = cpu_state.regs[opcode & 7].w;
cpu_src = 1;
bits = 16;
if ((opcode & 8) == 0) {
cpu_data = cpu_dest + cpu_src;
set_of_add(bits);
} else {
cpu_data = cpu_dest - cpu_src;
set_of_sub(bits);
}
do_af();
set_pzs(16);
cpu_state.regs[opcode & 7].w = cpu_data;
break;
case 0x50:
case 0x51:
case 0x52:
case 0x53: /*PUSH r16*/
case 0x54:
case 0x55:
case 0x56:
case 0x57:
access(30, 16);
push(&(cpu_state.regs[opcode & 0x07].w));
break;
case 0x58:
case 0x59:
case 0x5A:
case 0x5B: /*POP r16*/
case 0x5C:
case 0x5D:
case 0x5E:
case 0x5F:
access(23, 16);
cpu_state.regs[opcode & 0x07].w = pop();
wait(1, 0);
break;
case 0x60: /*JO alias*/
case 0x70: /*JO*/
case 0x61: /*JNO alias*/
case 0x71: /*JNO*/
jcc(opcode, cpu_state.flags & V_FLAG);
break;
case 0x62: /*JB alias*/
case 0x72: /*JB*/
case 0x63: /*JNB alias*/
case 0x73: /*JNB*/
jcc(opcode, cpu_state.flags & C_FLAG);
break;
case 0x64: /*JE alias*/
case 0x74: /*JE*/
case 0x65: /*JNE alias*/
case 0x75: /*JNE*/
jcc(opcode, cpu_state.flags & Z_FLAG);
break;
case 0x66: /*JBE alias*/
case 0x76: /*JBE*/
case 0x67: /*JNBE alias*/
case 0x77: /*JNBE*/
jcc(opcode, cpu_state.flags & (C_FLAG | Z_FLAG));
break;
case 0x68: /*JS alias*/
case 0x78: /*JS*/
case 0x69: /*JNS alias*/
case 0x79: /*JNS*/
jcc(opcode, cpu_state.flags & N_FLAG);
break;
case 0x6A: /*JP alias*/
case 0x7A: /*JP*/
case 0x6B: /*JNP alias*/
case 0x7B: /*JNP*/
jcc(opcode, cpu_state.flags & P_FLAG);
break;
case 0x6C: /*JL alias*/
case 0x7C: /*JL*/
case 0x6D: /*JNL alias*/
case 0x7D: /*JNL*/
temp = (cpu_state.flags & N_FLAG) ? 1 : 0;
temp2 = (cpu_state.flags & V_FLAG) ? 1 : 0;
jcc(opcode, temp ^ temp2);
break;
case 0x6E: /*JLE alias*/
case 0x7E: /*JLE*/
case 0x6F: /*JNLE alias*/
case 0x7F: /*JNLE*/
temp = (cpu_state.flags & N_FLAG) ? 1 : 0;
temp2 = (cpu_state.flags & V_FLAG) ? 1 : 0;
jcc(opcode, (cpu_state.flags & Z_FLAG) || (temp != temp2));
break;
case 0x80:
case 0x81:
case 0x82:
case 0x83:
/* alu rm, imm */
bits = 8 << (opcode & 1);
do_mod_rm();
access(47, bits);
cpu_data = get_ea();
cpu_dest = cpu_data;
if (cpu_mod != 3)
wait(3, 0);
if (opcode == 0x81) {
if (cpu_mod == 3)
wait(1, 0);
cpu_src = pfq_fetchw();
} else {
if (cpu_mod == 3)
wait(1, 0);
if (opcode == 0x83)
cpu_src = sign_extend(pfq_fetchb());
else
cpu_src = pfq_fetchb() | 0xff00;
}
wait(1, 0);
cpu_alu_op = (rmdat & 0x38) >> 3;
alu_op(bits);
if (cpu_alu_op != 7) {
access(11, bits);
set_ea(cpu_data);
} else {
if (cpu_mod != 3)
wait(1, 0);
}
break;
case 0x84:
case 0x85:
/* TEST rm, reg */
bits = 8 << (opcode & 1);
do_mod_rm();
access(48, bits);
cpu_data = get_ea();
test(bits, cpu_data, get_reg(cpu_reg));
if (cpu_mod == 3)
wait(2, 0);
wait(2, 0);
break;
case 0x86:
case 0x87:
/* XCHG rm, reg */
bits = 8 << (opcode & 1);
do_mod_rm();
access(49, bits);
cpu_data = get_ea();
cpu_src = get_reg(cpu_reg);
set_reg(cpu_reg, cpu_data);
wait(3, 0);
access(12, bits);
set_ea(cpu_src);
break;
case 0x88:
case 0x89:
/* MOV rm, reg */
bits = 8 << (opcode & 1);
do_mod_rm();
wait(1, 0);
access(13, bits);
set_ea(get_reg(cpu_reg));
break;
case 0x8A:
case 0x8B:
/* MOV reg, rm */
bits = 8 << (opcode & 1);
do_mod_rm();
access(50, bits);
set_reg(cpu_reg, get_ea());
wait(1, 0);
if (cpu_mod != 3)
wait(2, 0);
break;
case 0x8C: /*MOV w,sreg*/
do_mod_rm();
if (cpu_mod == 3)
wait(1, 0);
access(14, 16);
seteaw(_opseg[(rmdat & 0x18) >> 3]->seg);
break;
case 0x8D: /*LEA*/
do_mod_rm();
cpu_state.regs[cpu_reg].w = cpu_state.eaaddr;
wait(1, 0);
if (cpu_mod != 3)
wait(2, 0);
break;
case 0x8E: /*MOV sreg,w*/
do_mod_rm();
access(51, 16);
tempw = geteaw();
if ((rmdat & 0x18) == 0x08) {
load_cs(tempw);
pfq_pos = 0;
} else
load_seg(tempw, _opseg[(rmdat & 0x18) >> 3]);
wait(1, 0);
if (cpu_mod != 3)
wait(2, 0);
if (((rmdat & 0x18) >> 3) == 2)
noint = 1;
break;
case 0x8F: /*POPW*/
do_mod_rm();
wait(1, 0);
cpu_src = cpu_state.eaaddr;
access(24, 16);
if (cpu_mod != 3)
wait(2, 0);
cpu_data = pop();
cpu_state.eaaddr = cpu_src;
wait(2, 0);
access(15, 16);
seteaw(cpu_data);
break;
case 0x90:
case 0x91:
case 0x92:
case 0x93:
case 0x94:
case 0x95:
case 0x96:
case 0x97:
/* XCHG AX, rw */
wait(1, 0);
cpu_data = cpu_state.regs[opcode & 7].w;
cpu_state.regs[opcode & 7].w = AX;
AX = cpu_data;
wait(1, 0);
break;
case 0x98: /*CBW*/
wait(1, 0);
AX = sign_extend(AL);
break;
case 0x99: /*CWD*/
wait(4, 0);
if (!top_bit(AX, 16))
DX = 0;
else {
wait(1, 0);
DX = 0xffff;
}
break;
case 0x9A: /*CALL FAR*/
wait(1, 0);
new_ip = pfq_fetchw();
wait(1, 0);
new_cs = pfq_fetchw();
pfq_clear();
access(31, 16);
push(&(CS));
access(60, 16);
cpu_state.oldpc = cpu_state.pc;
load_cs(new_cs);
set_ip(new_ip);
access(32, 16);
push((uint16_t *) &(cpu_state.oldpc));
break;
case 0x9B: /*WAIT*/
if (!repeating)
wait(2, 0);
wait(5, 0);
#ifdef NO_HACK
if (irq_pending()) {
wait(7, 0);
check_interrupts();
} else {
repeating = 1;
completed = 0;
clock_end();
}
#else
wait(7, 0);
check_interrupts();
#endif
break;
case 0x9C: /*PUSHF*/
access(33, 16);
if (is_nec)
tempw = (cpu_state.flags & 0x8fd7) | 0x7000;
else
tempw = (cpu_state.flags & 0x0fd7) | 0xf000;
push(&tempw);
break;
case 0x9D: /*POPF*/
access(25, 16);
if (is_nec)
cpu_state.flags = pop() | 0x8002;
else
cpu_state.flags = pop() | 0x0002;
wait(1, 0);
break;
case 0x9E: /*SAHF*/
wait(1, 0);
cpu_state.flags = (cpu_state.flags & 0xff02) | AH;
wait(2, 0);
break;
case 0x9F: /*LAHF*/
wait(1, 0);
AH = cpu_state.flags & 0xd7;
break;
case 0xA0:
case 0xA1:
/* MOV A, [iw] */
bits = 8 << (opcode & 1);
wait(1, 0);
cpu_state.eaaddr = pfq_fetchw();
access(1, bits);
set_accum(bits, readmem((ovr_seg ? *ovr_seg : ds)));
wait(1, 0);
break;
case 0xA2:
case 0xA3:
/* MOV [iw], A */
bits = 8 << (opcode & 1);
wait(1, 0);
cpu_state.eaaddr = pfq_fetchw();
access(7, bits);
writemem((ovr_seg ? *ovr_seg : ds), get_accum(bits));
break;
case 0xA4:
case 0xA5: /* MOVS */
case 0xAC:
case 0xAD: /* LODS */
bits = 8 << (opcode & 1);
if (!repeating) {
wait(1, 0);
if ((opcode & 8) == 0 && in_rep != 0)
wait(1, 0);
}
if (rep_action(bits)) {
wait(1, 0);
if ((opcode & 8) != 0)
wait(1, 0);
break;
}
if (in_rep != 0 && (opcode & 8) != 0)
wait(1, 0);
access(20, bits);
lods(bits);
if ((opcode & 8) == 0) {
access(27, bits);
stos(bits);
} else {
set_accum(bits, cpu_data);
if (in_rep != 0)
wait(2, 0);
}
if (in_rep == 0) {
wait(3, 0);
if ((opcode & 8) != 0)
wait(1, 0);
break;
}
repeating = 1;
clock_end();
break;
case 0xA6:
case 0xA7: /* CMPS */
case 0xAE:
case 0xAF: /* SCAS */
bits = 8 << (opcode & 1);
if (!repeating)
wait(1, 0);
if (rep_action(bits)) {
wait(2, 0);
break;
}
if (in_rep != 0)
wait(1, 0);
wait(1, 0);
cpu_dest = get_accum(bits);
if ((opcode & 8) == 0) {
access(21, bits);
lods(bits);
wait(1, 0);
cpu_dest = cpu_data;
}
access(2, bits);
cpu_state.eaaddr = DI;
cpu_data = readmem(es);
DI = string_increment(bits);
cpu_src = cpu_data;
sub(bits);
wait(2, 0);
if (in_rep == 0) {
wait(3, 0);
break;
}
if ((!!(cpu_state.flags & (rep_c_flag ? C_FLAG : Z_FLAG))) == (in_rep == 1)) {
completed = 1;
wait(4, 0);
break;
}
repeating = 1;
clock_end();
break;
case 0xA8:
case 0xA9:
/* TEST A, imm */
bits = 8 << (opcode & 1);
wait(1, 0);
cpu_data = pfq_fetch();
test(bits, get_accum(bits), cpu_data);
wait(1, 0);
break;
case 0xAA:
case 0xAB: /* STOS */
bits = 8 << (opcode & 1);
if (!repeating) {
wait(1, 0);
if (in_rep != 0)
wait(1, 0);
}
if (rep_action(bits)) {
wait(1, 0);
break;
}
cpu_data = AX;
access(28, bits);
stos(bits);
if (in_rep == 0) {
wait(3, 0);
break;
}
repeating = 1;
clock_end();
break;
case 0xB0:
case 0xB1:
case 0xB2:
case 0xB3: /*MOV cpu_reg,#8*/
case 0xB4:
case 0xB5:
case 0xB6:
case 0xB7:
wait(1, 0);
if (opcode & 0x04)
cpu_state.regs[opcode & 0x03].b.h = pfq_fetchb();
else
cpu_state.regs[opcode & 0x03].b.l = pfq_fetchb();
wait(1, 0);
break;
case 0xB8:
case 0xB9:
case 0xBA:
case 0xBB: /*MOV cpu_reg,#16*/
case 0xBC:
case 0xBD:
case 0xBE:
case 0xBF:
wait(1, 0);
cpu_state.regs[opcode & 0x07].w = pfq_fetchw();
wait(1, 0);
break;
case 0xC0:
case 0xC1:
case 0xC2:
case 0xC3:
case 0xC8:
case 0xC9:
case 0xCA:
case 0xCB:
/* RET */
bits = 8 + (opcode & 0x08);
if ((opcode & 9) != 1)
wait(1, 0);
if (!(opcode & 1)) {
cpu_src = pfq_fetchw();
wait(1, 0);
}
if ((opcode & 9) == 9)
wait(1, 0);
pfq_clear();
access(26, bits);
new_ip = pop();
wait(2, 0);
if ((opcode & 8) == 0)
new_cs = CS;
else {
access(42, bits);
new_cs = pop();
if (opcode & 1)
wait(1, 0);
}
if (!(opcode & 1)) {
SP += cpu_src;
wait(1, 0);
}
load_cs(new_cs);
access(72, bits);
set_ip(new_ip);
break;
case 0xC4:
case 0xC5:
/* LsS rw, rmd */
do_mod_rm();
bits = 16;
access(52, bits);
read_ea(1, bits);
cpu_state.regs[cpu_reg].w = cpu_data;
access(57, bits);
read_ea2(bits);
load_seg(cpu_data, (opcode & 0x01) ? &cpu_state.seg_ds : &cpu_state.seg_es);
wait(1, 0);
break;
case 0xC6:
case 0xC7:
/* MOV rm, imm */
bits = 8 << (opcode & 1);
do_mod_rm();
wait(1, 0);
if (cpu_mod != 3)
wait(2, 0);
cpu_data = pfq_fetch();
if (cpu_mod == 3)
wait(1, 0);
access(16, bits);
set_ea(cpu_data);
break;
case 0xCC: /*INT 3*/
interrupt(3);
break;
case 0xCD: /*INT*/
wait(1, 0);
interrupt(pfq_fetchb());
break;
case 0xCE: /*INTO*/
wait(3, 0);
if (cpu_state.flags & V_FLAG) {
wait(2, 0);
interrupt(4);
}
break;
case 0xCF: /*IRET*/
access(43, 8);
new_ip = pop();
wait(3, 0);
access(44, 8);
new_cs = pop();
load_cs(new_cs);
access(62, 8);
set_ip(new_ip);
access(45, 8);
if (is_nec)
cpu_state.flags = pop() | 0x8002;
else
cpu_state.flags = pop() | 0x0002;
wait(5, 0);
noint = 1;
nmi_enable = 1;
break;
case 0xD0:
case 0xD1:
case 0xD2:
case 0xD3:
/* rot rm */
bits = 8 << (opcode & 1);
do_mod_rm();
if (cpu_mod == 3)
wait(1, 0);
access(53, bits);
cpu_data = get_ea();
if ((opcode & 2) == 0) {
cpu_src = 1;
wait((cpu_mod != 3) ? 4 : 0, 0);
} else {
cpu_src = CL;
wait((cpu_mod != 3) ? 9 : 6, 0);
}
if (is186 && !is_nec)
cpu_src &= 0x1F;
while (cpu_src != 0) {
cpu_dest = cpu_data;
oldc = cpu_state.flags & C_FLAG;
switch (rmdat & 0x38) {
case 0x00: /* ROL */
set_cf(top_bit(cpu_data, bits));
cpu_data <<= 1;
cpu_data |= ((cpu_state.flags & C_FLAG) ? 1 : 0);
set_of_rotate(bits);
set_af(0);
break;
case 0x08: /* ROR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
if (cpu_state.flags & C_FLAG)
cpu_data |= (!(opcode & 1) ? 0x80 : 0x8000);
set_of_rotate(bits);
set_af(0);
break;
case 0x10: /* RCL */
set_cf(top_bit(cpu_data, bits));
cpu_data = (cpu_data << 1) | (oldc ? 1 : 0);
set_of_rotate(bits);
set_af(0);
break;
case 0x18: /* RCR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
if (oldc)
cpu_data |= (!(opcode & 0x01) ? 0x80 : 0x8000);
set_cf((cpu_dest & 1) != 0);
set_of_rotate(bits);
set_af(0);
break;
case 0x20: /* SHL */
set_cf(top_bit(cpu_data, bits));
cpu_data <<= 1;
set_of_rotate(bits);
set_af((cpu_data & 0x10) != 0);
set_pzs(bits);
break;
case 0x28: /* SHR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
set_of_rotate(bits);
set_af(0);
set_pzs(bits);
break;
case 0x30: /* SETMO - undocumented? */
bitwise(bits, 0xffff);
set_cf(0);
set_of_rotate(bits);
set_af(0);
set_pzs(bits);
break;
case 0x38: /* SAR */
set_cf((cpu_data & 1) != 0);
cpu_data >>= 1;
if (!(opcode & 1))
cpu_data |= (cpu_dest & 0x80);
else
cpu_data |= (cpu_dest & 0x8000);
set_of_rotate(bits);
set_af(0);
set_pzs(bits);
break;
}
if ((opcode & 2) != 0)
wait(4, 0);
--cpu_src;
}
access(17, bits);
set_ea(cpu_data);
break;
case 0xD4: /*AAM*/
wait(1, 0);
#ifdef NO_VARIANT_ON_NEC
if (is_nec) {
(void) pfq_fetchb();
cpu_src = 10;
} else
cpu_src = pfq_fetchb();
#else
cpu_src = pfq_fetchb();
#endif
if (x86_div(AL, 0))
set_pzs(16);
break;
case 0xD5: /*AAD*/
wait(1, 0);
if (is_nec) {
(void) pfq_fetchb();
mul(10, AH);
} else
mul(pfq_fetchb(), AH);
cpu_dest = AL;
cpu_src = cpu_data;
add(8);
AL = cpu_data;
AH = 0x00;
break;
case 0xD6: /*SALC*/
wait(1, 0);
AL = (cpu_state.flags & C_FLAG) ? 0xff : 0x00;
wait(1, 0);
break;
case 0xD7: /*XLATB*/
cpu_state.eaaddr = (BX + AL) & 0xffff;
access(4, 8);
AL = readmemb((ovr_seg ? *ovr_seg : ds) + cpu_state.eaaddr);
wait(1, 0);
break;
case 0xD8:
case 0xD9:
case 0xDA:
case 0xDB:
case 0xDD:
case 0xDC:
case 0xDE:
case 0xDF:
/* esc i, r, rm */
do_mod_rm();
access(54, 16);
tempw = cpu_state.pc;
if (!hasfpu)
geteaw();
else
if (fpu_softfloat) {
switch (opcode) {
case 0xD8:
ops_sf_fpu_8087_d8[(rmdat >> 3) & 0x1f](rmdat);
break;
case 0xD9:
ops_sf_fpu_8087_d9[rmdat & 0xff](rmdat);
break;
case 0xDA:
ops_sf_fpu_8087_da[rmdat & 0xff](rmdat);
break;
case 0xDB:
ops_sf_fpu_8087_db[rmdat & 0xff](rmdat);
break;
case 0xDC:
ops_sf_fpu_8087_dc[(rmdat >> 3) & 0x1f](rmdat);
break;
case 0xDD:
ops_sf_fpu_8087_dd[rmdat & 0xff](rmdat);
break;
case 0xDE:
ops_sf_fpu_8087_de[rmdat & 0xff](rmdat);
break;
case 0xDF:
ops_sf_fpu_8087_df[rmdat & 0xff](rmdat);
break;
default:
break;
}
} else {
switch (opcode) {
case 0xD8:
ops_fpu_8087_d8[(rmdat >> 3) & 0x1f](rmdat);
break;
case 0xD9:
ops_fpu_8087_d9[rmdat & 0xff](rmdat);
break;
case 0xDA:
ops_fpu_8087_da[rmdat & 0xff](rmdat);
break;
case 0xDB:
ops_fpu_8087_db[rmdat & 0xff](rmdat);
break;
case 0xDC:
ops_fpu_8087_dc[(rmdat >> 3) & 0x1f](rmdat);
break;
case 0xDD:
ops_fpu_8087_dd[rmdat & 0xff](rmdat);
break;
case 0xDE:
ops_fpu_8087_de[rmdat & 0xff](rmdat);
break;
case 0xDF:
ops_fpu_8087_df[rmdat & 0xff](rmdat);
break;
default:
break;
}
}
cpu_state.pc = tempw; /* Do this as the x87 code advances it, which is needed on
the 286+ core, but not here. */
wait(1, 0);
if (cpu_mod != 3)
wait(2, 0);
break;
case 0xE0:
case 0xE1:
case 0xE2:
case 0xE3:
/* LOOP */
wait(3, 0);
cpu_data = pfq_fetchb();
if (opcode != 0xe2)
wait(1, 0);
if (opcode != 0xe3) {
--CX;
oldc = (CX != 0);
switch (opcode) {
case 0xE0:
if (cpu_state.flags & Z_FLAG)
oldc = 0;
break;
case 0xE1:
if (!(cpu_state.flags & Z_FLAG))
oldc = 0;
break;
}
} else
oldc = (CX == 0);
if (oldc)
jump_short();
break;
case 0xE4:
case 0xE5:
case 0xE6:
case 0xE7:
case 0xEC:
case 0xED:
case 0xEE:
case 0xEF:
bits = 8 << (opcode & 1);
if ((opcode & 0x0e) != 0x0c)
wait(1, 0);
if ((opcode & 8) == 0)
cpu_data = pfq_fetchb();
else
cpu_data = DX;
cpu_state.eaaddr = cpu_data;
if ((opcode & 2) == 0) {
access(3, bits);
if (opcode & 1)
cpu_io(16, 0, cpu_data);
else
cpu_io(8, 0, cpu_data);
wait(1, 0);
} else {
if ((opcode & 8) == 0)
access(8, bits);
else
access(9, bits);
if (opcode & 1)
cpu_io(16, 1, cpu_data);
else
cpu_io(8, 1, cpu_data);
}
break;
case 0xE8: /*CALL rel 16*/
wait(1, 0);
cpu_state.oldpc = jump_near();
access(34, 8);
push((uint16_t *) &(cpu_state.oldpc));
break;
case 0xE9: /*JMP rel 16*/
wait(1, 0);
jump_near();
break;
case 0xEA: /*JMP far*/
wait(1, 0);
addr = pfq_fetchw();
wait(1, 0);
tempw = pfq_fetchw();
load_cs(tempw);
access(70, 8);
pfq_clear();
set_ip(addr);
break;
case 0xEB: /*JMP rel*/
wait(1, 0);
cpu_data = (int8_t) pfq_fetchb();
jump_short();
wait(1, 0);
break;
case 0xF0:
case 0xF1: /*LOCK - F1 is alias*/
in_lock = 1;
wait(1, 0);
completed = 0;
break;
case 0xF2: /*REPNE*/
case 0xF3: /*REPE*/
wait(1, 0);
in_rep = (opcode == 0xf2 ? 1 : 2);
completed = 0;
rep_c_flag = 0;
break;
case 0xF4: /*HLT*/
if (!repeating) {
wait(1, 0);
pfq_clear();
}
wait(1, 0);
if (irq_pending()) {
wait(cycles & 1, 0);
check_interrupts();
} else {
repeating = 1;
completed = 0;
clock_end();
}
break;
case 0xF5: /*CMC*/
wait(1, 0);
cpu_state.flags ^= C_FLAG;
break;
case 0xF6:
case 0xF7:
bits = 8 << (opcode & 1);
do_mod_rm();
access(55, bits);
cpu_data = get_ea();
switch (rmdat & 0x38) {
case 0x00:
case 0x08:
/* TEST */
wait(2, 0);
if (cpu_mod != 3)
wait(1, 0);
cpu_src = pfq_fetch();
wait(1, 0);
test(bits, cpu_data, cpu_src);
if (cpu_mod != 3)
wait(1, 0);
break;
case 0x10: /* NOT */
case 0x18: /* NEG */
wait(2, 0);
if ((rmdat & 0x38) == 0x10)
cpu_data = ~cpu_data;
else {
cpu_src = cpu_data;
cpu_dest = 0;
sub(bits);
}
access(18, bits);
set_ea(cpu_data);
break;
case 0x20: /* MUL */
case 0x28: /* IMUL */
old_flags = cpu_state.flags;
wait(1, 0);
mul(get_accum(bits), cpu_data);
if (opcode & 1) {
AX = cpu_data;
DX = cpu_dest;
set_co_mul(bits, DX != ((AX & 0x8000) == 0 || (rmdat & 0x38) == 0x20 ? 0 : 0xffff));
cpu_data = DX;
} else {
AL = (uint8_t) cpu_data;
AH = (uint8_t) cpu_dest;
set_co_mul(bits, AH != ((AL & 0x80) == 0 || (rmdat & 0x38) == 0x20 ? 0 : 0xff));
if (!is_nec)
cpu_data = AH;
}
set_sf(bits);
set_pf();
if (cpu_mod != 3)
wait(1, 0);
/* NOTE: When implementing the V20, care should be taken to not change
the zero flag. */
if (is_nec)
cpu_state.flags = (cpu_state.flags & ~Z_FLAG) | (old_flags & Z_FLAG);
break;
case 0x30: /* DIV */
case 0x38: /* IDIV */
if (cpu_mod != 3)
wait(1, 0);
cpu_src = cpu_data;
if (x86_div(AL, AH))
wait(1, 0);
break;
}
break;
case 0xF8:
case 0xF9:
/* CLCSTC */
wait(1, 0);
set_cf(opcode & 1);
break;
case 0xFA:
case 0xFB:
/* CLISTI */
wait(1, 0);
set_if(opcode & 1);
break;
case 0xFC:
case 0xFD:
/* CLDSTD */
wait(1, 0);
set_df(opcode & 1);
break;
case 0xFE:
case 0xFF:
/* misc */
bits = 8 << (opcode & 1);
do_mod_rm();
access(56, bits);
read_ea(((rmdat & 0x38) == 0x18) || ((rmdat & 0x38) == 0x28), bits);
switch (rmdat & 0x38) {
case 0x00: /* INC rm */
case 0x08: /* DEC rm */
cpu_dest = cpu_data;
cpu_src = 1;
if ((rmdat & 0x38) == 0x00) {
cpu_data = cpu_dest + cpu_src;
set_of_add(bits);
} else {
cpu_data = cpu_dest - cpu_src;
set_of_sub(bits);
}
do_af();
set_pzs(bits);
wait(2, 0);
access(19, bits);
set_ea(cpu_data);
break;
case 0x10: /* CALL rm */
cpu_data_opff_rm();
access(63, bits);
wait(1, 0);
pfq_clear();
wait(4, 0);
if (cpu_mod != 3)
wait(1, 0);
wait(1, 0); /* Wait. */
cpu_state.oldpc = cpu_state.pc;
set_ip(cpu_data);
wait(2, 0);
access(35, bits);
push((uint16_t *) &(cpu_state.oldpc));
break;
case 0x18: /* CALL rmd */
new_ip = cpu_data;
access(58, bits);
read_ea2(bits);
if (!(opcode & 1))
cpu_data |= 0xff00;
new_cs = cpu_data;
access(36, bits);
push(&(CS));
access(64, bits);
wait(4, 0);
cpu_state.oldpc = cpu_state.pc;
load_cs(new_cs);
set_ip(new_ip);
access(37, bits);
push((uint16_t *) &(cpu_state.oldpc));
break;
case 0x20: /* JMP rm */
cpu_data_opff_rm();
access(65, bits);
set_ip(cpu_data);
break;
case 0x28: /* JMP rmd */
new_ip = cpu_data;
access(59, bits);
read_ea2(bits);
if (!(opcode & 1))
cpu_data |= 0xff00;
new_cs = cpu_data;
load_cs(new_cs);
access(66, bits);
set_ip(new_ip);
break;
case 0x30: /* PUSH rm */
case 0x38:
if (cpu_mod != 3)
wait(1, 0);
access(38, bits);
push((uint16_t *) &(cpu_data));
break;
}
break;
default:
x808x_log("Illegal opcode: %02X\n", opcode);
pfq_fetchb();
wait(8, 0);
break;
}
}
if (completed) {
repeating = 0;
ovr_seg = NULL;
in_rep = 0;
rep_c_flag = 0;
if (in_lock)
clear_lock = 1;
clock_end();
check_interrupts();
if (noint)
noint = 0;
cpu_alu_op = 0;
}
#ifdef USE_GDBSTUB
if (gdbstub_instruction())
return;
#endif
}
}
``` | /content/code_sandbox/src/cpu/808x.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 26,001 |
```objective-c
#define PUSH_W_OP(reg) \
static int opPUSH_##reg(uint32_t fetchdat) \
{ \
PUSH_W(reg); \
CLOCK_CYCLES((is486) ? 1 : 2); \
PREFETCH_RUN(2, 1, -1, 0, 0, 1, 0, 0); \
return cpu_state.abrt; \
}
#define PUSH_L_OP(reg) \
static int opPUSH_##reg(uint32_t fetchdat) \
{ \
PUSH_L(reg); \
CLOCK_CYCLES((is486) ? 1 : 2); \
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 1, 0); \
return cpu_state.abrt; \
}
#define POP_W_OP(reg) \
static int opPOP_##reg(uint32_t fetchdat) \
{ \
reg = POP_W(); \
CLOCK_CYCLES((is486) ? 1 : 4); \
PREFETCH_RUN(4, 1, -1, 1, 0, 0, 0, 0); \
return cpu_state.abrt; \
}
#define POP_L_OP(reg) \
static int opPOP_##reg(uint32_t fetchdat) \
{ \
reg = POP_L(); \
CLOCK_CYCLES((is486) ? 1 : 4); \
PREFETCH_RUN(4, 1, -1, 0, 1, 0, 0, 0); \
return cpu_state.abrt; \
}
PUSH_W_OP(AX)
PUSH_W_OP(BX)
PUSH_W_OP(CX)
PUSH_W_OP(DX)
PUSH_W_OP(SI)
PUSH_W_OP(DI)
PUSH_W_OP(BP)
PUSH_W_OP(SP)
PUSH_L_OP(EAX)
PUSH_L_OP(EBX)
PUSH_L_OP(ECX)
PUSH_L_OP(EDX)
PUSH_L_OP(ESI)
PUSH_L_OP(EDI)
PUSH_L_OP(EBP)
PUSH_L_OP(ESP)
POP_W_OP(AX)
POP_W_OP(BX)
POP_W_OP(CX)
POP_W_OP(DX)
POP_W_OP(SI)
POP_W_OP(DI)
POP_W_OP(BP)
POP_W_OP(SP)
POP_L_OP(EAX)
POP_L_OP(EBX)
POP_L_OP(ECX)
POP_L_OP(EDX)
POP_L_OP(ESI)
POP_L_OP(EDI)
POP_L_OP(EBP)
POP_L_OP(ESP)
static int
opPUSHA_w(uint32_t fetchdat)
{
if (stack32) {
writememw(ss, ESP - 2, AX);
writememw(ss, ESP - 4, CX);
writememw(ss, ESP - 6, DX);
writememw(ss, ESP - 8, BX);
writememw(ss, ESP - 10, SP);
writememw(ss, ESP - 12, BP);
writememw(ss, ESP - 14, SI);
writememw(ss, ESP - 16, DI);
if (!cpu_state.abrt)
ESP -= 16;
} else {
writememw(ss, ((SP - 2) & 0xFFFF), AX);
writememw(ss, ((SP - 4) & 0xFFFF), CX);
writememw(ss, ((SP - 6) & 0xFFFF), DX);
writememw(ss, ((SP - 8) & 0xFFFF), BX);
writememw(ss, ((SP - 10) & 0xFFFF), SP);
writememw(ss, ((SP - 12) & 0xFFFF), BP);
writememw(ss, ((SP - 14) & 0xFFFF), SI);
writememw(ss, ((SP - 16) & 0xFFFF), DI);
if (!cpu_state.abrt)
SP -= 16;
}
CLOCK_CYCLES((is486) ? 11 : 18);
PREFETCH_RUN(18, 1, -1, 0, 0, 8, 0, 0);
return cpu_state.abrt;
}
static int
opPUSHA_l(uint32_t fetchdat)
{
if (stack32) {
writememl(ss, ESP - 4, EAX);
writememl(ss, ESP - 8, ECX);
writememl(ss, ESP - 12, EDX);
writememl(ss, ESP - 16, EBX);
writememl(ss, ESP - 20, ESP);
writememl(ss, ESP - 24, EBP);
writememl(ss, ESP - 28, ESI);
writememl(ss, ESP - 32, EDI);
if (!cpu_state.abrt)
ESP -= 32;
} else {
writememl(ss, ((SP - 4) & 0xFFFF), EAX);
writememl(ss, ((SP - 8) & 0xFFFF), ECX);
writememl(ss, ((SP - 12) & 0xFFFF), EDX);
writememl(ss, ((SP - 16) & 0xFFFF), EBX);
writememl(ss, ((SP - 20) & 0xFFFF), ESP);
writememl(ss, ((SP - 24) & 0xFFFF), EBP);
writememl(ss, ((SP - 28) & 0xFFFF), ESI);
writememl(ss, ((SP - 32) & 0xFFFF), EDI);
if (!cpu_state.abrt)
SP -= 32;
}
CLOCK_CYCLES((is486) ? 11 : 18);
PREFETCH_RUN(18, 1, -1, 0, 0, 0, 8, 0);
return cpu_state.abrt;
}
static int
opPOPA_w(uint32_t fetchdat)
{
if (stack32) {
DI = readmemw(ss, ESP);
if (cpu_state.abrt)
return 1;
SI = readmemw(ss, ESP + 2);
if (cpu_state.abrt)
return 1;
BP = readmemw(ss, ESP + 4);
if (cpu_state.abrt)
return 1;
BX = readmemw(ss, ESP + 8);
if (cpu_state.abrt)
return 1;
DX = readmemw(ss, ESP + 10);
if (cpu_state.abrt)
return 1;
CX = readmemw(ss, ESP + 12);
if (cpu_state.abrt)
return 1;
AX = readmemw(ss, ESP + 14);
if (cpu_state.abrt)
return 1;
ESP += 16;
} else {
DI = readmemw(ss, ((SP) &0xFFFF));
if (cpu_state.abrt)
return 1;
SI = readmemw(ss, ((SP + 2) & 0xFFFF));
if (cpu_state.abrt)
return 1;
BP = readmemw(ss, ((SP + 4) & 0xFFFF));
if (cpu_state.abrt)
return 1;
BX = readmemw(ss, ((SP + 8) & 0xFFFF));
if (cpu_state.abrt)
return 1;
DX = readmemw(ss, ((SP + 10) & 0xFFFF));
if (cpu_state.abrt)
return 1;
CX = readmemw(ss, ((SP + 12) & 0xFFFF));
if (cpu_state.abrt)
return 1;
AX = readmemw(ss, ((SP + 14) & 0xFFFF));
if (cpu_state.abrt)
return 1;
SP += 16;
}
CLOCK_CYCLES((is486) ? 9 : 24);
PREFETCH_RUN(24, 1, -1, 7, 0, 0, 0, 0);
return 0;
}
static int
opPOPA_l(uint32_t fetchdat)
{
if (stack32) {
EDI = readmeml(ss, ESP);
if (cpu_state.abrt)
return 1;
ESI = readmeml(ss, ESP + 4);
if (cpu_state.abrt)
return 1;
EBP = readmeml(ss, ESP + 8);
if (cpu_state.abrt)
return 1;
EBX = readmeml(ss, ESP + 16);
if (cpu_state.abrt)
return 1;
EDX = readmeml(ss, ESP + 20);
if (cpu_state.abrt)
return 1;
ECX = readmeml(ss, ESP + 24);
if (cpu_state.abrt)
return 1;
EAX = readmeml(ss, ESP + 28);
if (cpu_state.abrt)
return 1;
ESP += 32;
} else {
EDI = readmeml(ss, ((SP) &0xFFFF));
if (cpu_state.abrt)
return 1;
ESI = readmeml(ss, ((SP + 4) & 0xFFFF));
if (cpu_state.abrt)
return 1;
EBP = readmeml(ss, ((SP + 8) & 0xFFFF));
if (cpu_state.abrt)
return 1;
EBX = readmeml(ss, ((SP + 16) & 0xFFFF));
if (cpu_state.abrt)
return 1;
EDX = readmeml(ss, ((SP + 20) & 0xFFFF));
if (cpu_state.abrt)
return 1;
ECX = readmeml(ss, ((SP + 24) & 0xFFFF));
if (cpu_state.abrt)
return 1;
EAX = readmeml(ss, ((SP + 28) & 0xFFFF));
if (cpu_state.abrt)
return 1;
SP += 32;
}
CLOCK_CYCLES((is486) ? 9 : 24);
PREFETCH_RUN(24, 1, -1, 0, 7, 0, 0, 0);
return 0;
}
static int
opPUSH_imm_w(uint32_t fetchdat)
{
uint16_t val = getwordf();
PUSH_W(val);
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 3, -1, 0, 0, 1, 0, 0);
return cpu_state.abrt;
}
static int
opPUSH_imm_l(uint32_t fetchdat)
{
uint32_t val = getlong();
if (cpu_state.abrt)
return 1;
PUSH_L(val);
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 3, -1, 0, 0, 0, 1, 0);
return cpu_state.abrt;
}
static int
opPUSH_imm_bw(uint32_t fetchdat)
{
uint16_t tempw = getbytef();
if (tempw & 0x80)
tempw |= 0xFF00;
PUSH_W(tempw);
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 2, -1, 0, 0, 1, 0, 0);
return cpu_state.abrt;
}
static int
opPUSH_imm_bl(uint32_t fetchdat)
{
uint32_t templ = getbytef();
if (templ & 0x80)
templ |= 0xFFFFFF00;
PUSH_L(templ);
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 2, -1, 0, 0, 0, 1, 0);
return cpu_state.abrt;
}
static int
opPOPW_a16(uint32_t fetchdat)
{
uint16_t temp;
temp = POP_W();
if (cpu_state.abrt)
return 1;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(temp);
if (cpu_state.abrt) {
if (stack32)
ESP -= 2;
else
SP -= 2;
}
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 6);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 4 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 4 : 5, 2, rmdat, 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 0);
return cpu_state.abrt;
}
static int
opPOPW_a32(uint32_t fetchdat)
{
uint16_t temp;
temp = POP_W();
if (cpu_state.abrt)
return 1;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteaw(temp);
if (cpu_state.abrt) {
if (stack32)
ESP -= 2;
else
SP -= 2;
}
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 6);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 4 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 4 : 5, 2, rmdat, 1, 0, (cpu_mod == 3) ? 0 : 1, 0, 1);
return cpu_state.abrt;
}
static int
opPOPL_a16(uint32_t fetchdat)
{
uint32_t temp;
temp = POP_L();
if (cpu_state.abrt)
return 1;
fetch_ea_16(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(temp);
if (cpu_state.abrt) {
if (stack32)
ESP -= 4;
else
SP -= 4;
}
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 6);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 4 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 4 : 5, 2, rmdat, 0, 1, 0, (cpu_mod == 3) ? 0 : 1, 0);
return cpu_state.abrt;
}
static int
opPOPL_a32(uint32_t fetchdat)
{
uint32_t temp;
temp = POP_L();
if (cpu_state.abrt)
return 1;
fetch_ea_32(fetchdat);
if (cpu_mod != 3)
SEG_CHECK_WRITE(cpu_state.ea_seg);
seteal(temp);
if (cpu_state.abrt) {
if (stack32)
ESP -= 4;
else
SP -= 4;
}
if (is486) {
CLOCK_CYCLES((cpu_mod == 3) ? 1 : 6);
} else {
CLOCK_CYCLES((cpu_mod == 3) ? 4 : 5);
}
PREFETCH_RUN((cpu_mod == 3) ? 4 : 5, 2, rmdat, 0, 1, 0, (cpu_mod == 3) ? 0 : 1, 1);
return cpu_state.abrt;
}
static int
opENTER_w(uint32_t fetchdat)
{
uint16_t offset;
int count;
uint32_t tempEBP;
uint32_t tempESP;
uint32_t frame_ptr;
#ifndef IS_DYNAREC
int reads = 0;
int writes = 1;
int instr_cycles = 0;
#endif
uint16_t tempw;
offset = getwordf();
count = (fetchdat >> 16) & 0xff;
cpu_state.pc++;
tempEBP = EBP;
tempESP = ESP;
PUSH_W(BP);
if (cpu_state.abrt)
return 1;
frame_ptr = ESP;
if (count > 0) {
while (--count) {
BP -= 2;
tempw = readmemw(ss, BP);
if (cpu_state.abrt) {
ESP = tempESP;
EBP = tempEBP;
return 1;
}
PUSH_W(tempw);
if (cpu_state.abrt) {
ESP = tempESP;
EBP = tempEBP;
return 1;
}
CLOCK_CYCLES((is486) ? 3 : 4);
#ifndef IS_DYNAREC
reads++;
writes++;
instr_cycles += (is486) ? 3 : 4;
#endif
}
PUSH_W(frame_ptr);
if (cpu_state.abrt) {
ESP = tempESP;
EBP = tempEBP;
return 1;
}
CLOCK_CYCLES((is486) ? 3 : 5);
#ifndef IS_DYNAREC
writes++;
instr_cycles += (is486) ? 3 : 5;
#endif
}
BP = frame_ptr;
if (stack32)
ESP -= offset;
else
SP -= offset;
CLOCK_CYCLES((is486) ? 14 : 10);
#ifndef IS_DYNAREC
instr_cycles += (is486) ? 14 : 10;
PREFETCH_RUN(instr_cycles, 3, -1, reads, 0, writes, 0, 0);
#endif
return 0;
}
static int
opENTER_l(uint32_t fetchdat)
{
uint16_t offset;
int count;
uint32_t tempEBP;
uint32_t tempESP;
uint32_t frame_ptr;
#ifndef IS_DYNAREC
int reads = 0;
int writes = 1;
int instr_cycles = 0;
#endif
uint32_t templ;
offset = getwordf();
count = (fetchdat >> 16) & 0xff;
cpu_state.pc++;
tempEBP = EBP;
tempESP = ESP;
PUSH_L(EBP);
if (cpu_state.abrt)
return 1;
frame_ptr = ESP;
if (count > 0) {
while (--count) {
EBP -= 4;
templ = readmeml(ss, EBP);
if (cpu_state.abrt) {
ESP = tempESP;
EBP = tempEBP;
return 1;
}
PUSH_L(templ);
if (cpu_state.abrt) {
ESP = tempESP;
EBP = tempEBP;
return 1;
}
CLOCK_CYCLES((is486) ? 3 : 4);
#ifndef IS_DYNAREC
reads++;
writes++;
instr_cycles += (is486) ? 3 : 4;
#endif
}
PUSH_L(frame_ptr);
if (cpu_state.abrt) {
ESP = tempESP;
EBP = tempEBP;
return 1;
}
CLOCK_CYCLES((is486) ? 3 : 5);
#ifndef IS_DYNAREC
writes++;
instr_cycles += (is486) ? 3 : 5;
#endif
}
EBP = frame_ptr;
if (stack32)
ESP -= offset;
else
SP -= offset;
CLOCK_CYCLES((is486) ? 14 : 10);
#ifndef IS_DYNAREC
instr_cycles += (is486) ? 14 : 10;
PREFETCH_RUN(instr_cycles, 3, -1, reads, 0, writes, 0, 0);
#endif
return 0;
}
static int
opLEAVE_w(uint32_t fetchdat)
{
uint32_t tempESP = ESP;
uint16_t temp;
SP = BP;
temp = POP_W();
if (cpu_state.abrt) {
ESP = tempESP;
return 1;
}
BP = temp;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 1, 0, 0, 0, 0);
return 0;
}
static int
opLEAVE_l(uint32_t fetchdat)
{
uint32_t tempESP = ESP;
uint32_t temp;
ESP = EBP;
temp = POP_L();
if (cpu_state.abrt) {
ESP = tempESP;
return 1;
}
EBP = temp;
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 1, 0, 0, 0);
return 0;
}
#define PUSH_SEG_OPS(seg) \
static int opPUSH_##seg##_w(uint32_t fetchdat) \
{ \
PUSH_W(seg); \
CLOCK_CYCLES(2); \
PREFETCH_RUN(2, 1, -1, 0, 0, 1, 0, 0); \
return cpu_state.abrt; \
} \
static int opPUSH_##seg##_l(uint32_t fetchdat) \
{ \
PUSH_L(seg); \
CLOCK_CYCLES(2); \
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 1, 0); \
return cpu_state.abrt; \
}
#define POP_SEG_OPS(seg, realseg) \
static int opPOP_##seg##_w(uint32_t fetchdat) \
{ \
uint16_t temp_seg; \
uint32_t temp_esp = ESP; \
temp_seg = POP_W(); \
if (cpu_state.abrt) \
return 1; \
op_loadseg(temp_seg, realseg); \
if (cpu_state.abrt) \
ESP = temp_esp; \
CLOCK_CYCLES(is486 ? 3 : 7); \
PREFETCH_RUN(is486 ? 3 : 7, 1, -1, 0, 0, 1, 0, 0); \
return cpu_state.abrt; \
} \
static int opPOP_##seg##_l(uint32_t fetchdat) \
{ \
uint32_t temp_seg; \
uint32_t temp_esp = ESP; \
temp_seg = POP_L(); \
if (cpu_state.abrt) \
return 1; \
op_loadseg(temp_seg & 0xffff, realseg); \
if (cpu_state.abrt) \
ESP = temp_esp; \
CLOCK_CYCLES(is486 ? 3 : 7); \
PREFETCH_RUN(is486 ? 3 : 7, 1, -1, 0, 0, 1, 0, 0); \
return cpu_state.abrt; \
}
PUSH_SEG_OPS(CS)
PUSH_SEG_OPS(DS)
PUSH_SEG_OPS(ES)
PUSH_SEG_OPS(FS)
PUSH_SEG_OPS(GS)
PUSH_SEG_OPS(SS)
POP_SEG_OPS(DS, &cpu_state.seg_ds)
POP_SEG_OPS(ES, &cpu_state.seg_es)
POP_SEG_OPS(FS, &cpu_state.seg_fs)
POP_SEG_OPS(GS, &cpu_state.seg_gs)
static int
opPOP_SS_w(uint32_t fetchdat)
{
uint16_t temp_seg;
uint32_t temp_esp = ESP;
temp_seg = POP_W();
if (cpu_state.abrt)
return 1;
op_loadseg(temp_seg, &cpu_state.seg_ss);
if (cpu_state.abrt) {
ESP = temp_esp;
return 1;
}
CLOCK_CYCLES(is486 ? 3 : 7);
PREFETCH_RUN(is486 ? 3 : 7, 1, -1, 0, 0, 1, 0, 0);
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ssegs = 0;
cpu_state.ea_seg = &cpu_state.seg_ds;
fetchdat = fastreadl(cs + cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
#ifdef OPS_286_386
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#else
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#endif
return 1;
}
static int
opPOP_SS_l(uint32_t fetchdat)
{
uint32_t temp_seg;
uint32_t temp_esp = ESP;
temp_seg = POP_L();
if (cpu_state.abrt)
return 1;
op_loadseg(temp_seg & 0xffff, &cpu_state.seg_ss);
if (cpu_state.abrt) {
ESP = temp_esp;
return 1;
}
CLOCK_CYCLES(is486 ? 3 : 7);
PREFETCH_RUN(is486 ? 3 : 7, 1, -1, 0, 0, 1, 0, 0);
cpu_state.oldpc = cpu_state.pc;
cpu_state.op32 = use32;
cpu_state.ssegs = 0;
cpu_state.ea_seg = &cpu_state.seg_ds;
fetchdat = fastreadl(cs + cpu_state.pc);
cpu_state.pc++;
if (cpu_state.abrt)
return 1;
#ifdef OPS_286_386
x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#else
x86_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
#endif
return 1;
}
``` | /content/code_sandbox/src/cpu/x86_ops_stack.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 5,895 |
```c
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <wchar.h>
#include <86box/86box.h>
#include "cpu.h"
#include <86box/mem.h>
#include "codegen_timing_common.h"
uint64_t opcode_deps[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* ADD ADD PUSH ES POP ES*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, IMPL_ESP,
/* OR OR OR OR*/
SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* OR OR PUSH CS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, 0,
/* ADC ADC ADC ADC*/
/*10*/ SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* ADC ADC PUSH SS POP SS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, IMPL_ESP,
/* SBB SBB SBB SBB*/
SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* SBB SBB PUSH DS POP DS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, IMPL_ESP,
/* AND AND AND AND*/
/*20*/ SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* AND AND DAA*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* SUB SUB SUB SUB*/
SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* SUB SUB DAS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* XOR XOR XOR XOR*/
/*30*/ SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* XOR XOR AAA*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* CMP CMP CMP CMP*/
SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | MODRM,
/* CMP CMP AAS*/
SRCDEP_EAX | HAS_IMM8, SRCDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ SRCDEP_EAX | DSTDEP_EAX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_EDX | DSTDEP_EDX, SRCDEP_EBX | DSTDEP_EBX,
/* INC ESP INC EBP INC ESI INC EDI*/
SRCDEP_ESP | DSTDEP_ESP, SRCDEP_EBP | DSTDEP_EBP, SRCDEP_ESI | DSTDEP_ESI, SRCDEP_EDI | DSTDEP_EDI,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
SRCDEP_EAX | DSTDEP_EAX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_EDX | DSTDEP_EDX, SRCDEP_EBX | DSTDEP_EBX,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
SRCDEP_ESP | DSTDEP_ESP, SRCDEP_EBP | DSTDEP_EBP, SRCDEP_ESI | DSTDEP_ESI, SRCDEP_EDI | DSTDEP_EDI,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ SRCDEP_EAX | IMPL_ESP, SRCDEP_ECX | IMPL_ESP, SRCDEP_EDX | IMPL_ESP, SRCDEP_EBX | IMPL_ESP,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
SRCDEP_ESP | IMPL_ESP, SRCDEP_EBP | IMPL_ESP, SRCDEP_ESI | IMPL_ESP, SRCDEP_EDI | IMPL_ESP,
/* POP EAX POP ECX POP EDX POP EBX*/
DSTDEP_EAX | IMPL_ESP, DSTDEP_ECX | IMPL_ESP, DSTDEP_EDX | IMPL_ESP, DSTDEP_EBX | IMPL_ESP,
/* POP ESP POP EBP POP ESI POP EDI*/
DSTDEP_ESP | IMPL_ESP, DSTDEP_EBP | IMPL_ESP, DSTDEP_ESI | IMPL_ESP, DSTDEP_EDI | IMPL_ESP,
/* PUSHA POPA BOUND ARPL*/
/*60*/ IMPL_ESP, IMPL_ESP, 0, 0,
0, 0, 0, 0,
/* PUSH imm IMUL PUSH imm IMUL*/
IMPL_ESP | HAS_IMM1632,DSTDEP_REG | MODRM, IMPL_ESP | HAS_IMM8, DSTDEP_REG | MODRM,
/* INSB INSW OUTSB OUTSW*/
0, 0, 0, 0,
/* Jxx*/
/*70*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/*80*/ 0, 0, 0, 0,
/* TEST TEST XCHG XCHG*/
SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM, SRCDEP_REG | DSTDEP_REG | MODRM,
/* MOV MOV MOV MOV*/
SRCDEP_REG | MODRM, SRCDEP_REG | MODRM, DSTDEP_REG | MODRM, DSTDEP_REG | MODRM,
/* MOV from seg LEA MOV to seg POP*/
MODRM, DSTDEP_REG | MODRM, MODRM, IMPL_ESP | MODRM,
/* NOP XCHG XCHG XCHG*/
/*90*/ 0, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_ECX | DSTDEP_ECX, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EDX | DSTDEP_EDX, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EBX | DSTDEP_EBX,
/* XCHG XCHG XCHG XCHG*/
SRCDEP_EAX | DSTDEP_EAX | SRCDEP_ESP | DSTDEP_ESP, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EBP | DSTDEP_EBP, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_ESI | DSTDEP_ESI, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EDI | DSTDEP_EDI,
/* CBW CWD CALL far WAIT*/
SRCDEP_EAX | DSTDEP_EAX, SRCDEP_EAX | DSTDEP_EDX, 0, 0,
/* PUSHF POPF SAHF LAHF*/
IMPL_ESP, IMPL_ESP, SRCDEP_EAX, DSTDEP_EAX,
/* MOV MOV MOV MOV*/
/*a0*/ DSTDEP_EAX, DSTDEP_EAX, SRCDEP_EAX, SRCDEP_EAX,
/* MOVSB MOVSW CMPSB CMPSW*/
0, 0, 0, 0,
/* TEST TEST STOSB STOSW*/
SRCDEP_EAX, SRCDEP_EAX, 0, 0,
/* LODSB LODSW SCASB SCASW*/
0, 0, 0, 0,
/* MOV*/
/*b0*/ DSTDEP_EAX | HAS_IMM8, DSTDEP_ECX | HAS_IMM8, DSTDEP_EDX | HAS_IMM8, DSTDEP_EBX | HAS_IMM8,
DSTDEP_EAX | HAS_IMM8, DSTDEP_ECX | HAS_IMM8, DSTDEP_EDX | HAS_IMM8, DSTDEP_EBX | HAS_IMM8,
DSTDEP_EAX | HAS_IMM1632, DSTDEP_ECX | HAS_IMM1632, DSTDEP_EDX | HAS_IMM1632, DSTDEP_EBX | HAS_IMM1632,
DSTDEP_ESP | HAS_IMM1632, DSTDEP_EBP | HAS_IMM1632, DSTDEP_ESI | HAS_IMM1632, DSTDEP_EDI | HAS_IMM1632,
/* RET imm RET*/
/*c0*/ 0, 0, SRCDEP_ESP | DSTDEP_ESP, IMPL_ESP,
/* LES LDS MOV MOV*/
DSTDEP_REG | MODRM, DSTDEP_REG | MODRM, MODRM | HAS_IMM8, MODRM | HAS_IMM1632,
/* ENTER LEAVE RETF RETF*/
IMPL_ESP, IMPL_ESP, IMPL_ESP, IMPL_ESP,
/* INT3 INT INTO IRET*/
0, 0, 0, 0,
/*d0*/ 0, 0, 0, 0,
/* AAM AAD SETALC XLAT*/
SRCDEP_EAX | DSTDEP_EAX, SRCDEP_EAX | DSTDEP_EAX, SRCDEP_EAX, SRCDEP_EAX | SRCDEP_EBX,
0, 0, 0, 0,
0, 0, 0, 0,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ SRCDEP_ECX | DSTDEP_ECX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_ECX,
/* IN AL IN AX OUT_AL OUT_AX*/
DSTDEP_EAX, DSTDEP_EAX, SRCDEP_EAX, SRCDEP_EAX,
/* CALL JMP JMP JMP*/
IMPL_ESP, 0, 0, 0,
/* IN AL IN AX OUT_AL OUT_AX*/
SRCDEP_EDX | DSTDEP_EAX, SRCDEP_EDX | DSTDEP_EAX, SRCDEP_EDX | SRCDEP_EAX, SRCDEP_EDX | SRCDEP_EAX,
/* REPNE REPE*/
/*f0*/ 0, 0, 0, 0,
/* HLT CMC*/
0, 0, 0, 0,
/* CLC STC CLI STI*/
0, 0, 0, 0,
/* CLD STD INCDEC*/
0, 0, MODRM, 0
// clang-format on
};
uint64_t opcode_deps_mod3[256] = {
// clang-format off
/* ADD ADD ADD ADD*/
/*00*/ SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* ADD ADD PUSH ES POP ES*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, IMPL_ESP,
/* OR OR OR OR*/
SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* OR OR PUSH CS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, 0,
/* ADC ADC ADC ADC*/
/*10*/ SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* ADC ADC PUSH SS POP SS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, IMPL_ESP,
/* SBB SBB SBB SBB*/
SRCDEP_REG |SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* SBB SBB PUSH DS POP DS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, IMPL_ESP, IMPL_ESP,
/* AND AND AND AND*/
/*20*/ SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* AND AND DAA*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* SUB SUB SUB SUB*/
SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* SUB SUB DAS*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* XOR XOR XOR XOR*/
/*30*/ SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | MODRM,
/* XOR XOR AAA*/
SRCDEP_EAX | DSTDEP_EAX | HAS_IMM8, SRCDEP_EAX | DSTDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* CMP CMP CMP CMP*/
SRCDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | MODRM,
/* CMP CMP AAS*/
SRCDEP_EAX | HAS_IMM8, SRCDEP_EAX | HAS_IMM1632, 0, SRCDEP_EAX | DSTDEP_EAX,
/* INC EAX INC ECX INC EDX INC EBX*/
/*40*/ SRCDEP_EAX | DSTDEP_EAX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_EDX | DSTDEP_EDX, SRCDEP_EBX | DSTDEP_EBX,
/* INC ESP INC EBP INC ESI INC EDI*/
SRCDEP_ESP | DSTDEP_ESP, SRCDEP_EBP | DSTDEP_EBP, SRCDEP_ESI | DSTDEP_ESI, SRCDEP_EDI | DSTDEP_EDI,
/* DEC EAX DEC ECX DEC EDX DEC EBX*/
SRCDEP_EAX | DSTDEP_EAX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_EDX | DSTDEP_EDX, SRCDEP_EBX | DSTDEP_EBX,
/* DEC ESP DEC EBP DEC ESI DEC EDI*/
SRCDEP_ESP | DSTDEP_ESP, SRCDEP_EBP | DSTDEP_EBP, SRCDEP_ESI | DSTDEP_ESI, SRCDEP_EDI | DSTDEP_EDI,
/* PUSH EAX PUSH ECX PUSH EDX PUSH EBX*/
/*50*/ SRCDEP_EAX | IMPL_ESP, SRCDEP_ECX | IMPL_ESP, SRCDEP_EDX | IMPL_ESP, SRCDEP_EBX | IMPL_ESP,
/* PUSH ESP PUSH EBP PUSH ESI PUSH EDI*/
SRCDEP_ESP | IMPL_ESP, SRCDEP_EBP | IMPL_ESP, SRCDEP_ESI | IMPL_ESP, SRCDEP_EDI | IMPL_ESP,
/* POP EAX POP ECX POP EDX POP EBX*/
DSTDEP_EAX | IMPL_ESP, DSTDEP_ECX | IMPL_ESP, DSTDEP_EDX | IMPL_ESP, DSTDEP_EBX | IMPL_ESP,
/* POP ESP POP EBP POP ESI POP EDI*/
DSTDEP_ESP | IMPL_ESP, DSTDEP_EBP | IMPL_ESP, DSTDEP_ESI | IMPL_ESP, DSTDEP_EDI | IMPL_ESP,
/* PUSHA POPA BOUND ARPL*/
/*60*/ IMPL_ESP, IMPL_ESP, 0, 0,
0, 0, 0, 0,
/* PUSH imm IMUL PUSH imm IMUL*/
IMPL_ESP | HAS_IMM1632,DSTDEP_REG | SRCDEP_RM | MODRM, IMPL_ESP | HAS_IMM8, DSTDEP_REG | SRCDEP_RM | MODRM,
/* INSB INSW OUTSB OUTSW*/
0, 0, 0, 0,
/* Jxx*/
/*70*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/*80*/ 0, 0, 0, 0,
/* TEST TEST XCHG XCHG*/
SRCDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | SRCDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_REG | SRCDEP_RM | DSTDEP_RM | MODRM,
/* MOV MOV MOV MOV*/
SRCDEP_REG | DSTDEP_RM | MODRM, SRCDEP_REG | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_REG | MODRM, SRCDEP_RM | DSTDEP_REG | MODRM,
/* MOV from seg LEA MOV to seg POP*/
DSTDEP_RM | MODRM, DSTDEP_REG | MODRM, SRCDEP_RM | MODRM, IMPL_ESP | DSTDEP_RM | MODRM,
/* NOP XCHG XCHG XCHG*/
/*90*/ 0, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_ECX | DSTDEP_ECX, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EDX | DSTDEP_EDX, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EBX | DSTDEP_EBX,
/* XCHG XCHG XCHG XCHG*/
SRCDEP_EAX | DSTDEP_EAX | SRCDEP_ESP | DSTDEP_ESP, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EBP | DSTDEP_EBP, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_ESI | DSTDEP_ESI, SRCDEP_EAX | DSTDEP_EAX | SRCDEP_EDI | DSTDEP_EDI,
/* CBW CWD CALL far WAIT*/
SRCDEP_EAX | DSTDEP_EAX, SRCDEP_EAX | DSTDEP_EDX, 0, 0,
/* PUSHF POPF SAHF LAHF*/
IMPL_ESP, IMPL_ESP, SRCDEP_EAX, DSTDEP_EAX,
/* MOV MOV MOV MOV*/
/*a0*/ DSTDEP_EAX, DSTDEP_EAX, SRCDEP_EAX, SRCDEP_EAX,
/* MOVSB MOVSW CMPSB CMPSW*/
0, 0, 0, 0,
/* TEST TEST STOSB STOSW*/
SRCDEP_EAX, SRCDEP_EAX, 0, 0,
/* LODSB LODSW SCASB SCASW*/
0, 0, 0, 0,
/* MOV*/
/*b0*/ DSTDEP_EAX | HAS_IMM8, DSTDEP_ECX | HAS_IMM8, DSTDEP_EDX | HAS_IMM8, DSTDEP_EBX | HAS_IMM8,
DSTDEP_EAX | HAS_IMM8, DSTDEP_ECX | HAS_IMM8, DSTDEP_EDX | HAS_IMM8, DSTDEP_EBX | HAS_IMM8,
DSTDEP_EAX | HAS_IMM1632, DSTDEP_ECX | HAS_IMM1632, DSTDEP_EDX | HAS_IMM1632, DSTDEP_EBX | HAS_IMM1632,
DSTDEP_ESP | HAS_IMM1632, DSTDEP_EBP | HAS_IMM1632, DSTDEP_ESI | HAS_IMM1632, DSTDEP_EDI | HAS_IMM1632,
/* RET imm RET*/
/*c0*/ 0, 0, SRCDEP_ESP | DSTDEP_ESP, IMPL_ESP,
/* LES LDS MOV MOV*/
DSTDEP_REG | MODRM, DSTDEP_REG | MODRM, DSTDEP_RM | MODRM | HAS_IMM8, DSTDEP_RM | MODRM | HAS_IMM1632,
/* ENTER LEAVE RETF RETF*/
IMPL_ESP, IMPL_ESP, IMPL_ESP, IMPL_ESP,
/* INT3 INT INTO IRET*/
0, 0, 0, 0,
/*d0*/ 0, 0, 0, 0,
/* AAM AAD SETALC XLAT*/
SRCDEP_EAX | DSTDEP_EAX, SRCDEP_EAX | DSTDEP_EAX, SRCDEP_EAX, SRCDEP_EAX | SRCDEP_EBX,
0, 0, 0, 0,
0, 0, 0, 0,
/* LOOPNE LOOPE LOOP JCXZ*/
/*e0*/ SRCDEP_ECX | DSTDEP_ECX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_ECX | DSTDEP_ECX, SRCDEP_ECX,
/* IN AL IN AX OUT_AL OUT_AX*/
DSTDEP_EAX, DSTDEP_EAX, SRCDEP_EAX, SRCDEP_EAX,
/* CALL JMP JMP JMP*/
IMPL_ESP, 0, 0, 0,
/* IN AL IN AX OUT_AL OUT_AX*/
SRCDEP_EDX | DSTDEP_EAX, SRCDEP_EDX | DSTDEP_EAX, SRCDEP_EDX | SRCDEP_EAX, SRCDEP_EDX | SRCDEP_EAX,
/* REPNE REPE*/
/*f0*/ 0, 0, 0, 0,
/* HLT CMC*/
0, 0, 0, 0,
/* CLC STC CLI STI*/
0, 0, 0, 0,
/* CLD STD INCDEC*/
0, 0, SRCDEP_RM | DSTDEP_RM | MODRM, 0
// clang-format on
};
uint64_t opcode_deps_0f[256] = {
// clang-format off
/*00*/ MODRM, MODRM, MODRM, MODRM,
0, 0, 0, 0,
0, 0, 0, 0,
0, MODRM, 0, MODRM,
/*10*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*20*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*30*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*40*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*50*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*60*/ MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
MODRM, MODRM, MODRM, MODRM,
MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
0, 0, MODRM, MODRM,
/*70*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
MODRM, MODRM, MODRM, 0,
0, 0, 0, 0,
0, 0, MODRM, MODRM,
/*80*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*90*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
/*a0*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, 0, 0,
MODRM, MODRM, 0, MODRM,
MODRM, MODRM, MODRM, MODRM,
/*b0*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
0, 0, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
/*c0*/ MODRM, MODRM, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*d0*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
0, MODRM | MMX_MULTIPLY, 0, 0,
MODRM, MODRM, 0, MODRM,
MODRM, MODRM, 0, MODRM,
/*e0*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, 0,
0, MODRM | MMX_MULTIPLY, 0, 0,
MODRM, MODRM, 0, MODRM,
MODRM, MODRM, 0, MODRM,
/*f0*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
0, MODRM | MMX_MULTIPLY, 0, 0,
MODRM, MODRM, MODRM, 0,
MODRM, MODRM, MODRM, 0,
// clang-format on
};
uint64_t opcode_deps_0f_mod3[256] = {
// clang-format off
/*00*/ MODRM, MODRM, MODRM, MODRM,
0, 0, 0, 0,
0, 0, 0, 0,
0, MODRM, 0, MODRM,
/*10*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*20*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*30*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*40*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*50*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*60*/ MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
MODRM, MODRM, MODRM, MODRM,
MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
0, 0, MODRM, MODRM,
/*70*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
MODRM, MODRM, MODRM, 0,
0, 0, 0, 0,
0, 0, MODRM, MODRM,
/*80*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*90*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
/*a0*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, 0, 0,
MODRM, MODRM, 0, MODRM,
MODRM, MODRM, MODRM, MODRM,
/*b0*/ MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
0, 0, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
/*c0*/ MODRM, MODRM, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*d0*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
0, MODRM | MMX_MULTIPLY, 0, 0,
MODRM, MODRM, 0, MODRM,
MODRM, MODRM, 0, MODRM,
/*e0*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, 0,
0, MODRM | MMX_MULTIPLY, 0, 0,
MODRM, MODRM, 0, MODRM,
MODRM, MODRM, 0, MODRM,
/*f0*/ 0, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK, MODRM | MMX_SHIFTPACK,
0, MODRM | MMX_MULTIPLY, 0, 0,
MODRM, MODRM, MODRM, 0,
MODRM, MODRM, MODRM, 0,
// clang-format on
};
uint64_t opcode_deps_0f0f[256] = {
// clang-format off
/*00*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, MODRM, 0, 0,
/*10*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, MODRM, 0, 0,
/*20*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*30*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*40*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*50*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*60*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*70*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*80*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*90*/ MODRM, 0, 0, 0,
MODRM, 0, MODRM, MODRM,
0, 0, MODRM, 0,
0, 0, MODRM, 0,
/*a0*/ MODRM, 0, 0, 0,
MODRM, 0, MODRM, MODRM,
0, 0, 0, 0,
0, 0, 0, 0,
/*b0*/ MODRM, 0, 0, 0,
MODRM, 0, MODRM, MODRM,
0, 0, 0, 0,
0, 0, 0, MODRM,
/*c0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*d0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*e0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*f0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
// clang-format on
};
uint64_t opcode_deps_0f0f_mod3[256] = {
// clang-format off
/*00*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, MODRM, 0, 0,
/*10*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, MODRM, 0, 0,
/*20*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*30*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*40*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*50*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*60*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*70*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*80*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*90*/ MODRM, 0, 0, 0,
MODRM, 0, MODRM, MODRM,
0, 0, MODRM, 0,
0, 0, MODRM, 0,
/*a0*/ MODRM, 0, 0, 0,
MODRM, 0, MODRM, MODRM,
0, 0, 0, 0,
0, 0, 0, 0,
/*b0*/ MODRM, 0, 0, 0,
MODRM, 0, MODRM, MODRM,
0, 0, 0, 0,
0, 0, 0, MODRM,
/*c0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*d0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*e0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
/*f0*/ 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
// clang-format on
};
uint64_t opcode_deps_shift[8] = {
// clang-format off
MODRM, MODRM, MODRM, MODRM,
MODRM, MODRM, MODRM, MODRM,
// clang-format on
};
uint64_t opcode_deps_shift_mod3[8] = {
// clang-format off
SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM,
SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM,
// clang-format on
};
uint64_t opcode_deps_shift_cl[8] = {
// clang-format off
MODRM | SRCDEP_ECX, MODRM | SRCDEP_ECX, MODRM | SRCDEP_ECX, MODRM | SRCDEP_ECX,
MODRM | SRCDEP_ECX, MODRM | SRCDEP_ECX, MODRM | SRCDEP_ECX, MODRM | SRCDEP_ECX,
// clang-format on
};
uint64_t opcode_deps_shift_cl_mod3[8] = {
// clang-format off
SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX, SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX, SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX, SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX,
SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX, SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX, SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX, SRCDEP_RM | DSTDEP_RM | MODRM | SRCDEP_ECX,
// clang-format on
};
uint64_t opcode_deps_f6[8] = {
// clang-format off
/* TST NOT NEG*/
MODRM, 0, MODRM, MODRM,
/* MUL IMUL DIV IDIV*/
SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | MODRM, SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | MODRM
// clang-format on
};
uint64_t opcode_deps_f6_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
SRCDEP_RM | MODRM, 0, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM,
/* MUL IMUL DIV IDIV*/
SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | SRCDEP_RM | MODRM, SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | SRCDEP_RM | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | SRCDEP_RM | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | MODRM
// clang-format on
};
uint64_t opcode_deps_f7[8] = {
// clang-format off
/* TST NOT NEG*/
MODRM, 0, MODRM, MODRM,
/* MUL IMUL DIV IDIV*/
SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | MODRM, SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | MODRM
// clang-format on
};
uint64_t opcode_deps_f7_mod3[8] = {
// clang-format off
/* TST NOT NEG*/
SRCDEP_RM | MODRM, 0, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM,
/* MUL IMUL DIV IDIV*/
SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | SRCDEP_RM | MODRM, SRCDEP_EAX | DSTDEP_EAX | DSTDEP_EDX | SRCDEP_RM | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | SRCDEP_RM | MODRM, SRCDEP_EAX | SRCDEP_EDX | DSTDEP_EAX | DSTDEP_EDX | MODRM
// clang-format on
};
uint64_t opcode_deps_ff[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
MODRM, MODRM, MODRM | IMPL_ESP, MODRM,
/* JMP JMP far PUSH*/
MODRM, MODRM, MODRM | IMPL_ESP, 0
// clang-format on
};
uint64_t opcode_deps_ff_mod3[8] = {
// clang-format off
/* INC DEC CALL CALL far*/
SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | DSTDEP_RM | MODRM, SRCDEP_RM | MODRM | IMPL_ESP, MODRM,
/* JMP JMP far PUSH*/
SRCDEP_RM | MODRM, MODRM, SRCDEP_RM | MODRM | IMPL_ESP, 0
// clang-format on
};
uint64_t opcode_deps_d8[8] = {
// clang-format off
/* FADDs FMULs FCOMs FCOMPs*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_READ_ST0 | MODRM, FPU_POP | FPU_READ_ST0 | MODRM,
/* FSUBs FSUBRs FDIVs FDIVRs*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM
// clang-format on
};
uint64_t opcode_deps_d8_mod3[8] = {
// clang-format off
/* FADD FMUL FCOM FCOMP*/
FPU_RW_ST0 | FPU_READ_STREG, FPU_RW_ST0 | FPU_READ_STREG, FPU_READ_ST0 | FPU_READ_STREG, FPU_POP | FPU_READ_ST0 | FPU_READ_STREG,
/* FSUB FSUBR FDIV FDIVR*/
FPU_RW_ST0 | FPU_READ_STREG, FPU_RW_ST0 | FPU_READ_STREG, FPU_RW_ST0 | FPU_READ_STREG, FPU_RW_ST0 | FPU_READ_STREG
// clang-format on
};
uint64_t opcode_deps_d9[8] = {
// clang-format off
/* FLDs FSTs FSTPs*/
FPU_PUSH | MODRM, 0, FPU_READ_ST0 | MODRM, FPU_POP | MODRM,
/* FLDENV FLDCW FSTENV FSTCW*/
MODRM, MODRM, MODRM, MODRM
// clang-format on
};
uint64_t opcode_deps_d9_mod3[64] = {
// clang-format off
/*FLD*/
FPU_PUSH | FPU_READ_STREG, FPU_PUSH | FPU_READ_STREG, FPU_PUSH | FPU_READ_STREG, FPU_PUSH | FPU_READ_STREG,
FPU_PUSH | FPU_READ_STREG, FPU_PUSH | FPU_READ_STREG, FPU_PUSH | FPU_READ_STREG, FPU_PUSH | FPU_READ_STREG,
/*FXCH*/
FPU_FXCH, FPU_FXCH, FPU_FXCH, FPU_FXCH,
FPU_FXCH, FPU_FXCH, FPU_FXCH, FPU_FXCH,
/*FNOP*/
0, 0, 0, 0, 0, 0, 0, 0,
/*FSTP*/
FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP,
FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP,
/* opFCHS opFABS*/
0, 0, 0, 0,
/* opFTST opFXAM*/
0, 0, 0, 0,
/* opFLD1 opFLDL2T opFLDL2E opFLDPI*/
FPU_PUSH, FPU_PUSH, FPU_PUSH, FPU_PUSH,
/* opFLDEG2 opFLDLN2 opFLDZ*/
FPU_PUSH, FPU_PUSH, FPU_PUSH, 0,
/* opF2XM1 opFYL2X opFPTAN opFPATAN*/
0, 0, 0, 0,
/* opFDECSTP opFINCSTP,*/
0, 0, 0, 0,
/* opFPREM opFSQRT opFSINCOS*/
0, 0, 0, 0,
/* opFRNDINT opFSCALE opFSIN opFCOS*/
0, 0, 0, 0
// clang-format on
};
uint64_t opcode_deps_da[8] = {
// clang-format off
/* FIADDl FIMULl FICOMl FICOMPl*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_READ_ST0 | MODRM, FPU_READ_ST0 | FPU_POP | MODRM,
/* FISUBl FISUBRl FIDIVl FIDIVRl*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM
// clang-format on
};
uint64_t opcode_deps_da_mod3[8] = {
// clang-format off
0, 0, 0, 0,
/* FCOMPP*/
0, FPU_POP2, 0, 0
// clang-format on
};
uint64_t opcode_deps_db[8] = {
// clang-format off
/* FLDil FSTil FSTPil*/
FPU_PUSH | MODRM, 0, FPU_READ_ST0 | MODRM, FPU_READ_ST0 | FPU_POP | MODRM,
/* FLDe FSTPe*/
0, FPU_PUSH | MODRM, 0, FPU_READ_ST0 | FPU_POP | MODRM
// clang-format on
};
uint64_t opcode_deps_db_mod3[64] = {
// clang-format off
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* opFNOP opFCLEX opFINIT*/
0, 0, 0, 0,
/* opFNOP opFNOP*/
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
// clang-format on
};
uint64_t opcode_deps_dc[8] = {
// clang-format off
/* FADDd FMULd FCOMd FCOMPd*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_READ_ST0 | MODRM, FPU_READ_ST0 | FPU_POP | MODRM,
/* FSUBd FSUBRd FDIVd FDIVRd*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM
// clang-format on
};
uint64_t opcode_deps_dc_mod3[8] = {
// clang-format off
/* opFADDr opFMULr*/
FPU_READ_ST0 | FPU_RW_STREG, FPU_READ_ST0 | FPU_RW_STREG, 0, 0,
/* opFSUBRr opFSUBr opFDIVRr opFDIVr*/
FPU_READ_ST0 | FPU_RW_STREG, FPU_READ_ST0 | FPU_RW_STREG, FPU_READ_ST0 | FPU_RW_STREG, FPU_READ_ST0 | FPU_RW_STREG
// clang-format on
};
uint64_t opcode_deps_dd[8] = {
// clang-format off
/* FLDd FSTd FSTPd*/
FPU_PUSH | MODRM, 0, FPU_READ_ST0 | MODRM, FPU_READ_ST0 | FPU_POP | MODRM,
/* FRSTOR FSAVE FSTSW*/
MODRM, 0, MODRM, MODRM
// clang-format on
};
uint64_t opcode_deps_dd_mod3[8] = {
// clang-format off
/* FFFREE FST FSTP*/
0, 0, FPU_READ_ST0 | FPU_WRITE_STREG, FPU_READ_ST0 | FPU_WRITE_STREG | FPU_POP,
/* FUCOM FUCOMP*/
FPU_READ_ST0 | FPU_READ_STREG, FPU_READ_ST0 | FPU_READ_STREG | FPU_POP, 0, 0
// clang-format on
};
uint64_t opcode_deps_de[8] = {
// clang-format off
/* FIADDw FIMULw FICOMw FICOMPw*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_READ_ST0 | MODRM, FPU_READ_ST0 | FPU_POP | MODRM,
/* FISUBw FISUBRw FIDIVw FIDIVRw*/
FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM, FPU_RW_ST0 | MODRM
// clang-format on
};
uint64_t opcode_deps_de_mod3[8] = {
// clang-format off
/* FADDP FMULP FCOMPP*/
FPU_READ_ST0 | FPU_RW_STREG | FPU_POP, FPU_READ_ST0 | FPU_RW_STREG | FPU_POP, 0, FPU_READ_ST0 | FPU_READ_ST1 | FPU_POP2,
/* FSUBP FSUBRP FDIVP FDIVRP*/
FPU_READ_ST0 | FPU_RW_STREG | FPU_POP, FPU_READ_ST0 | FPU_RW_STREG | FPU_POP, FPU_READ_ST0 | FPU_RW_STREG | FPU_POP, FPU_READ_ST0 | FPU_RW_STREG | FPU_POP
// clang-format on
};
uint64_t opcode_deps_df[8] = {
// clang-format off
/* FILDiw FISTiw FISTPiw*/
FPU_PUSH | MODRM, 0, FPU_READ_ST0 | MODRM, FPU_READ_ST0 | FPU_POP | MODRM,
/* FILDiq FBSTP FISTPiq*/
0, FPU_PUSH | MODRM, FPU_READ_ST0 | FPU_POP | MODRM, FPU_READ_ST0 | FPU_POP | MODRM
// clang-format on
};
uint64_t opcode_deps_df_mod3[8] = {
// clang-format off
0, 0, 0, 0,
/* FSTSW AX*/
0, 0, 0, 0
// clang-format on
};
uint64_t opcode_deps_81[8] = {
// clang-format off
MODRM | HAS_IMM1632, MODRM | HAS_IMM1632, MODRM | HAS_IMM1632, MODRM | HAS_IMM1632,
MODRM | HAS_IMM1632, MODRM | HAS_IMM1632, MODRM | HAS_IMM1632, MODRM | HAS_IMM1632
// clang-format on
};
uint64_t opcode_deps_81_mod3[8] = {
// clang-format off
SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632,
SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM1632, SRCDEP_RM | MODRM | HAS_IMM1632
// clang-format on
};
uint64_t opcode_deps_8x[8] = {
// clang-format off
MODRM | HAS_IMM8, MODRM | HAS_IMM8, MODRM | HAS_IMM8, MODRM | HAS_IMM8,
MODRM | HAS_IMM8, MODRM | HAS_IMM8, MODRM | HAS_IMM8, MODRM | HAS_IMM8
// clang-format on
};
uint64_t opcode_deps_8x_mod3[8] = {
// clang-format off
SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8,
SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8, SRCDEP_RM | DSTDEP_RM | MODRM | HAS_IMM8, SRCDEP_RM | MODRM | HAS_IMM8
// clang-format on
};
``` | /content/code_sandbox/src/cpu/codegen_timing_common.c | c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 15,717 |
```objective-c
#define REP_OPS(size, CNT_REG, SRC_REG, DEST_REG) \
static int opREP_INSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
addr64 = 0x00000000; \
\
if (CNT_REG > 0) { \
uint8_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 1); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = 0; \
do_mmut_wb(es, DEST_REG, &addr64); \
if (cpu_state.abrt) \
return 1; \
temp = inb(DX); \
writememb_n(es, DEST_REG, addr64, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= 15; \
reads++; \
writes++; \
total_cycles += 15; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_INSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
addr64a[0] = addr64a[1] = 0x00000000; \
\
if (CNT_REG > 0) { \
uint16_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 2); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = 0; \
do_mmut_ww(es, DEST_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp = inw(DX); \
writememw_n(es, DEST_REG, addr64a, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= 15; \
reads++; \
writes++; \
total_cycles += 15; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_INSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
\
if (CNT_REG > 0) { \
uint32_t temp; \
\
SEG_CHECK_WRITE(&cpu_state.seg_es); \
check_io_perm(DX, 4); \
CHECK_WRITE(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = 0; \
do_mmut_wl(es, DEST_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp = inl(DX); \
writememl_n(es, DEST_REG, addr64a, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= 15; \
reads++; \
writes++; \
total_cycles += 15; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, writes, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_OUTSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
if (CNT_REG > 0) { \
uint8_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG); \
temp = readmemb(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 1); \
outb(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG--; \
else \
SRC_REG++; \
CNT_REG--; \
cycles -= 14; \
reads++; \
writes++; \
total_cycles += 14; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_OUTSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
if (CNT_REG > 0) { \
uint16_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
temp = readmemw(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 2); \
outw(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 2; \
else \
SRC_REG += 2; \
CNT_REG--; \
cycles -= 14; \
reads++; \
writes++; \
total_cycles += 14; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_OUTSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
\
if (CNT_REG > 0) { \
uint32_t temp; \
SEG_CHECK_READ(cpu_state.ea_seg); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
temp = readmeml(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
check_io_perm(DX, 4); \
outl(DX, temp); \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 4; \
else \
SRC_REG += 4; \
CNT_REG--; \
cycles -= 14; \
reads++; \
writes++; \
total_cycles += 14; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, writes, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_MOVSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64 = addr64_2 = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint8_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = 0; \
do_mmut_rb(cpu_state.ea_seg->base, SRC_REG, &addr64); \
if (cpu_state.abrt) \
break; \
do_mmut_wb(es, DEST_REG, &addr64_2); \
if (cpu_state.abrt) \
break; \
temp = readmemb_n(cpu_state.ea_seg->base, SRC_REG, addr64); \
if (cpu_state.abrt) \
return 1; \
writememb_n(es, DEST_REG, addr64_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG--; \
SRC_REG--; \
} else { \
DEST_REG++; \
SRC_REG++; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
reads++; \
writes++; \
total_cycles += is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_MOVSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64a[0] = addr64a[1] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint16_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = 0; \
do_mmut_rw(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
break; \
do_mmut_ww(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
break; \
temp = readmemw_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
writememw_n(es, DEST_REG, addr64a_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 2; \
SRC_REG -= 2; \
} else { \
DEST_REG += 2; \
SRC_REG += 2; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
reads++; \
writes++; \
total_cycles += is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_MOVSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000; \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) { \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
} \
while (CNT_REG > 0) { \
uint32_t temp; \
\
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = 0; \
do_mmut_rl(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
break; \
do_mmut_wl(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
break; \
temp = readmeml_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
writememl_n(es, DEST_REG, addr64a_2, temp); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 4; \
SRC_REG -= 4; \
} else { \
DEST_REG += 4; \
SRC_REG += 4; \
} \
CNT_REG--; \
cycles -= is486 ? 3 : 4; \
reads++; \
writes++; \
total_cycles += is486 ? 3 : 4; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_STOSB_##size(uint32_t fetchdat) \
{ \
int writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
writememb(es, DEST_REG, AL); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
writes++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_STOSW_##size(uint32_t fetchdat) \
{ \
int writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
writememw(es, DEST_REG, AX); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
writes++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, 0, writes, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_STOSL_##size(uint32_t fetchdat) \
{ \
int writes = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_WRITE(&cpu_state.seg_es); \
while (CNT_REG > 0) { \
CHECK_WRITE_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
writememl(es, DEST_REG, EAX); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
writes++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, 0, 0, writes, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_LODSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG); \
AL = readmemb(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG--; \
else \
SRC_REG++; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
reads++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_LODSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
AX = readmemw(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 2; \
else \
SRC_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
reads++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_LODSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
if (CNT_REG > 0) \
SEG_CHECK_READ(cpu_state.ea_seg); \
while (CNT_REG > 0) { \
CHECK_READ_REP(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
EAX = readmeml(cpu_state.ea_seg->base, SRC_REG); \
if (cpu_state.abrt) \
return 1; \
if (cpu_state.flags & D_FLAG) \
SRC_REG -= 4; \
else \
SRC_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 4 : 5; \
reads++; \
total_cycles += is486 ? 4 : 5; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, 0, 0); \
if (CNT_REG > 0) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
}
#define CHEK_READ(a, b, c)
#define REP_OPS_CMPS_SCAS(size, CNT_REG, SRC_REG, DEST_REG, FV) \
static int opREP_CMPSB_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
\
addr64 = addr64_2 = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint8_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG); \
high_page = uncached = 0; \
do_mmut_rb(cpu_state.ea_seg->base, SRC_REG, &addr64); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rb2(es, DEST_REG, &addr64_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmemb_n(cpu_state.ea_seg->base, SRC_REG, addr64); \
if (cpu_state.abrt) \
return 1; \
temp2 = readmemb_n(es, DEST_REG, addr64_2); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG--; \
SRC_REG--; \
} else { \
DEST_REG++; \
SRC_REG++; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
reads += 2; \
total_cycles += is486 ? 7 : 9; \
setsub8(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_CMPSW_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
\
addr64a[0] = addr64a[1] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint16_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 1UL); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
high_page = uncached = 0; \
do_mmut_rw(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rw2(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmemw_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp2 = readmemw_n(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 2; \
SRC_REG -= 2; \
} else { \
DEST_REG += 2; \
SRC_REG += 2; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
reads += 2; \
total_cycles += is486 ? 7 : 9; \
setsub16(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_CMPSL_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
\
addr64a[0] = addr64a[1] = addr64a[2] = addr64a[3] = 0x00000000; \
addr64a_2[0] = addr64a_2[1] = addr64a_2[2] = addr64a_2[3] = 0x00000000; \
\
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) { \
uint32_t temp, temp2; \
SEG_CHECK_READ(cpu_state.ea_seg); \
SEG_CHECK_READ(&cpu_state.seg_es); \
CHECK_READ(cpu_state.ea_seg, SRC_REG, SRC_REG + 3UL); \
CHECK_READ(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
high_page = uncached = 0; \
do_mmut_rl(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
do_mmut_rl2(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
temp = readmeml_n(cpu_state.ea_seg->base, SRC_REG, addr64a); \
if (cpu_state.abrt) \
return 1; \
temp2 = readmeml_n(es, DEST_REG, addr64a_2); \
if (cpu_state.abrt) \
return 1; \
\
if (cpu_state.flags & D_FLAG) { \
DEST_REG -= 4; \
SRC_REG -= 4; \
} else { \
DEST_REG += 4; \
SRC_REG += 4; \
} \
CNT_REG--; \
cycles -= is486 ? 7 : 9; \
reads += 2; \
total_cycles += is486 ? 7 : 9; \
setsub32(temp, temp2); \
tempz = (ZF_SET()) ? 1 : 0; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
\
static int opREP_SCASB_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG); \
uint8_t temp = readmemb(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub8(AL, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG--; \
else \
DEST_REG++; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
reads++; \
total_cycles += is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_SCASW_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 1UL); \
uint16_t temp = readmemw(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub16(AX, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 2; \
else \
DEST_REG += 2; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
reads++; \
total_cycles += is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, reads, 0, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
} \
static int opREP_SCASL_##size(uint32_t fetchdat) \
{ \
int reads = 0, total_cycles = 0, tempz; \
int cycles_end = cycles - ((is386 && cpu_use_dynarec) ? 1000 : 100); \
if (trap) \
cycles_end = cycles + 1; /*Force the instruction to end after only one iteration when trap flag set*/ \
tempz = FV; \
if ((CNT_REG > 0) && (FV == tempz)) \
SEG_CHECK_READ(&cpu_state.seg_es); \
while ((CNT_REG > 0) && (FV == tempz)) { \
CHECK_READ_REP(&cpu_state.seg_es, DEST_REG, DEST_REG + 3UL); \
uint32_t temp = readmeml(es, DEST_REG); \
if (cpu_state.abrt) \
break; \
setsub32(EAX, temp); \
tempz = (ZF_SET()) ? 1 : 0; \
if (cpu_state.flags & D_FLAG) \
DEST_REG -= 4; \
else \
DEST_REG += 4; \
CNT_REG--; \
cycles -= is486 ? 5 : 8; \
reads++; \
total_cycles += is486 ? 5 : 8; \
if (cycles < cycles_end) \
break; \
} \
PREFETCH_RUN(total_cycles, 1, -1, 0, reads, 0, 0, 0); \
if ((CNT_REG > 0) && (FV == tempz)) { \
CPU_BLOCK_END(); \
cpu_state.pc = cpu_state.oldpc; \
return 1; \
} \
return cpu_state.abrt; \
}
REP_OPS(a16, CX, SI, DI)
REP_OPS(a32, ECX, ESI, EDI)
REP_OPS_CMPS_SCAS(a16_NE, CX, SI, DI, 0)
REP_OPS_CMPS_SCAS(a16_E, CX, SI, DI, 1)
REP_OPS_CMPS_SCAS(a32_NE, ECX, ESI, EDI, 0)
REP_OPS_CMPS_SCAS(a32_E, ECX, ESI, EDI, 1)
static int
opREPNE(uint32_t fetchdat)
{
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32])
return x86_2386_opcodes_REPNE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
static int
opREPE(uint32_t fetchdat)
{
fetchdat = fastreadl_fetch(cs + cpu_state.pc);
if (cpu_state.abrt)
return 1;
cpu_state.pc++;
CLOCK_CYCLES(2);
PREFETCH_PREFIX();
if (x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32])
return x86_2386_opcodes_REPE[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
return x86_2386_opcodes[(fetchdat & 0xff) | cpu_state.op32](fetchdat >> 8);
}
``` | /content/code_sandbox/src/cpu/x86_ops_rep_2386.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 9,988 |
```objective-c
static int
sf_F2XM1(uint32_t fetchdat)
{
floatx80 result;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0)) {
FPU_stack_underflow(fetchdat, 0, 0);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
result = f2xm1(FPU_read_regi(0), &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0))
FPU_save_regi(result, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.f2xm1) : (x87_timings.f2xm1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.f2xm1) : (x87_concurrency.f2xm1 * cpu_multi));
return 0;
}
static int
sf_FYL2X(uint32_t fetchdat)
{
floatx80 result;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_stack_underflow(fetchdat, 1, 1);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
result = fyl2x(FPU_read_regi(0), FPU_read_regi(1), &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_pop();
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fyl2x) : (x87_timings.fyl2x * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fyl2x) : (x87_concurrency.fyl2x * cpu_multi));
return 0;
}
static int
sf_FPTAN(uint32_t fetchdat)
{
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
floatx80 y;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
clear_C2();
if (IS_TAG_EMPTY(0) || !IS_TAG_EMPTY(-1)) {
if (IS_TAG_EMPTY(0))
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
else
FPU_exception(fetchdat, FPU_EX_Stack_Overflow, 0);
/* The masked response */
if (is_IA_masked()) {
FPU_save_regi(floatx80_default_nan, 0);
FPU_push();
FPU_save_regi(floatx80_default_nan, 0);
}
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
y = FPU_read_regi(0);
if (ftan(&y, &status) == -1) {
fpu_state.swd |= FPU_SW_C2;
goto next_ins;
}
if (extF80_isNaN(y)) {
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_save_regi(y, 0);
FPU_push();
FPU_save_regi(y, 0);
}
goto next_ins;
}
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_save_regi(y, 0);
FPU_push();
FPU_save_regi(Const_1, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fptan) : (x87_timings.fptan * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fptan) : (x87_concurrency.fptan * cpu_multi));
return 0;
}
static int
sf_FPATAN(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
floatx80 result;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_stack_underflow(fetchdat, 1, 1);
goto next_ins;
}
a = FPU_read_regi(0);
b = FPU_read_regi(1);
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
result = fpatan(a, b, &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_pop();
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fpatan) : (x87_timings.fpatan * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fpatan) : (x87_concurrency.fpatan * cpu_multi));
return 0;
}
static int
sf_FXTRACT(uint32_t fetchdat)
{
struct softfloat_status_t status;
floatx80 a;
floatx80 b;
#if 0
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
#endif
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
#if 0 // TODO
if ((IS_TAG_EMPTY(0) || IS_TAG_EMPTY(-1))) {
if (IS_TAG_EMPTY(0))
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
else
FPU_exception(fetchdat, FPU_EX_Stack_Overflow, 0);
/* The masked response */
if (is_IA_masked()) {
FPU_save_regi(floatx80_default_nan, 0);
FPU_push();
FPU_save_regi(floatx80_default_nan, 0);
}
goto next_ins;
}
#endif
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = extF80_extract(&a, &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_save_regi(b, 0); // exponent
FPU_push();
FPU_save_regi(a, 0); // fraction
}
#if 0 // TODO.
next_ins:
#endif
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fxtract) : (x87_timings.fxtract * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fxtract) : (x87_concurrency.fxtract * cpu_multi));
return 0;
}
static int
sf_FPREM1(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
floatx80 result;
struct softfloat_status_t status;
uint64_t quotient = 0;
int flags;
int cc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
clear_C2();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_stack_underflow(fetchdat, 0, 0);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(1);
flags = floatx80_ieee754_remainder(a, b, &result, "ient, &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
if (flags >= 0) {
cc = 0;
if (flags)
cc = FPU_SW_C2;
else {
if (quotient & 1)
cc |= FPU_SW_C1;
if (quotient & 2)
cc |= FPU_SW_C3;
if (quotient & 4)
cc |= FPU_SW_C0;
}
setcc(cc);
}
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fprem1) : (x87_timings.fprem1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fprem1) : (x87_concurrency.fprem1 * cpu_multi));
return 0;
}
static int
sf_FPREM(uint32_t fetchdat)
{
floatx80 a;
floatx80 b;
floatx80 result;
struct softfloat_status_t status;
uint64_t quotient = 0;
int flags;
int cc;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
clear_C2();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_stack_underflow(fetchdat, 0, 0);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
a = FPU_read_regi(0);
b = FPU_read_regi(1);
// handle unsupported extended double-precision floating encodings
flags = floatx80_remainder(a, b, &result, "ient, &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
if (flags >= 0) {
cc = 0;
if (flags)
cc = FPU_SW_C2;
else {
if (quotient & 1)
cc |= FPU_SW_C1;
if (quotient & 2)
cc |= FPU_SW_C3;
if (quotient & 4)
cc |= FPU_SW_C0;
}
setcc(cc);
}
FPU_save_regi(result, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fprem) : (x87_timings.fprem * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fprem) : (x87_concurrency.fprem * cpu_multi));
return 0;
}
static int
sf_FYL2XP1(uint32_t fetchdat)
{
floatx80 result;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_stack_underflow(fetchdat, 1, 1);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
result = fyl2xp1(FPU_read_regi(0), FPU_read_regi(1), &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_save_regi(result, 1);
FPU_pop();
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fyl2xp1) : (x87_timings.fyl2xp1 * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fyl2xp1) : (x87_concurrency.fyl2xp1 * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FSINCOS(uint32_t fetchdat)
{
const floatx80 floatx80_default_nan = packFloatx80(0, floatx80_default_nan_exp, floatx80_default_nan_fraction);
struct softfloat_status_t status;
floatx80 y;
floatx80 sin_y;
floatx80 cos_y;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
clear_C2();
if (IS_TAG_EMPTY(0) || !IS_TAG_EMPTY(-1)) {
if (IS_TAG_EMPTY(0))
FPU_exception(fetchdat, FPU_EX_Stack_Underflow, 0);
else
FPU_exception(fetchdat, FPU_EX_Stack_Overflow, 0);
/* The masked response */
if (is_IA_masked()) {
FPU_save_regi(floatx80_default_nan, 0);
FPU_push();
FPU_save_regi(floatx80_default_nan, 0);
}
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
y = FPU_read_regi(0);
if (fsincos(y, &sin_y, &cos_y, &status) == -1) {
fpu_state.swd |= FPU_SW_C2;
goto next_ins;
}
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0)) {
FPU_save_regi(sin_y, 0);
FPU_push();
FPU_save_regi(cos_y, 0);
}
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsincos) : (x87_timings.fsincos * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsincos) : (x87_concurrency.fsincos * cpu_multi));
return 0;
}
#endif
static int
sf_FSCALE(uint32_t fetchdat)
{
floatx80 result;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
if (IS_TAG_EMPTY(0) || IS_TAG_EMPTY(1)) {
FPU_stack_underflow(fetchdat, 0, 0);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word());
result = extF80_scale(FPU_read_regi(0), FPU_read_regi(1), &status);
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0))
FPU_save_regi(result, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fscale) : (x87_timings.fscale * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fscale) : (x87_concurrency.fscale * cpu_multi));
return 0;
}
#ifndef FPU_8087
static int
sf_FSIN(uint32_t fetchdat)
{
floatx80 y;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
clear_C2();
if (IS_TAG_EMPTY(0)) {
FPU_stack_underflow(fetchdat, 0, 0);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
y = FPU_read_regi(0);
if (fsin(&y, &status) == -1) {
fpu_state.swd |= FPU_SW_C2;
goto next_ins;
}
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0))
FPU_save_regi(y, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsin_cos) : (x87_timings.fsin_cos * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsin_cos) : (x87_concurrency.fsin_cos * cpu_multi));
return 0;
}
static int
sf_FCOS(uint32_t fetchdat)
{
floatx80 y;
struct softfloat_status_t status;
FP_ENTER();
FPU_check_pending_exceptions();
cpu_state.pc++;
clear_C1();
clear_C2();
if (IS_TAG_EMPTY(0)) {
FPU_stack_underflow(fetchdat, 0, 0);
goto next_ins;
}
status = i387cw_to_softfloat_status_word(i387_get_control_word() | FPU_PR_80_BITS);
y = FPU_read_regi(0);
if (fcos(&y, &status) == -1) {
fpu_state.swd |= FPU_SW_C2;
goto next_ins;
}
if (!FPU_exception(fetchdat, status.softfloat_exceptionFlags, 0))
FPU_save_regi(y, 0);
next_ins:
CLOCK_CYCLES_FPU((fpu_type >= FPU_487SX) ? (x87_timings.fsin_cos) : (x87_timings.fsin_cos * cpu_multi));
CONCURRENCY_CYCLES((fpu_type >= FPU_487SX) ? (x87_concurrency.fsin_cos) : (x87_concurrency.fsin_cos * cpu_multi));
return 0;
}
#endif
``` | /content/code_sandbox/src/cpu/x87_ops_sf_trans.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 4,132 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* Second CPU header.
*
*
*
* Authors: Sarah Walker, <path_to_url
* leilei,
* Miran Grca, <mgrca8@gmail.com>
*
*/
#ifndef EMU_X86_H
#define EMU_X86_H
#define ABRT_MASK 0x3f
/*An 'expected' exception is one that would be expected to occur on every execution
of this code path; eg a GPF due to being in v86 mode. An 'unexpected' exception is
one that would be unlikely to occur on the next exception, eg a page fault may be
fixed up by the exception handler and the next execution would not hit it.
This distinction is used by the dynarec; a block that hits an 'expected' exception
would be compiled, a block that hits an 'unexpected' exception would be rejected so
that we don't end up with an unnecessarily short block*/
#define ABRT_EXPECTED 0x80
extern uint8_t opcode;
extern uint8_t flags_p;
extern uint8_t znptable8[256];
extern uint16_t zero;
extern uint16_t oldcs;
extern uint16_t lastcs;
extern uint16_t lastpc;
extern uint16_t *mod1add[2][8];
extern uint16_t znptable16[65536];
extern int x86_was_reset;
extern int trap;
extern int codegen_flat_ss;
extern int codegen_flat_ds;
extern int timetolive;
extern int keyboardtimer;
extern int trap;
extern int optype;
extern int stack32;
extern int oldcpl;
extern int cpl_override;
extern int nmi_enable;
extern int oddeven;
extern int inttype;
extern int cpu_init;
extern uint32_t use32;
extern uint32_t rmdat;
extern uint32_t easeg;
extern uint32_t oxpc;
extern uint32_t flags_zn;
extern uint32_t abrt_error;
extern uint32_t backupregs[16];
extern uint32_t *mod1seg[8];
extern uint32_t *eal_r;
extern uint32_t *eal_w;
extern int fpu_cycles;
#define fetchdat rmdat
#define setznp168 setznp16
#define getr8(r) ((r & 4) ? cpu_state.regs[r & 3].b.h : cpu_state.regs[r & 3].b.l)
#define getr16(r) cpu_state.regs[r].w
#define getr32(r) cpu_state.regs[r].l
#define setr8(r, v) \
if (r & 4) \
cpu_state.regs[r & 3].b.h = v; \
else \
cpu_state.regs[r & 3].b.l = v;
#define setr16(r, v) cpu_state.regs[r].w = v
#define setr32(r, v) cpu_state.regs[r].l = v
#define fetchea() \
{ \
rmdat = readmemb(cs + pc); \
pc++; \
reg = (rmdat >> 3) & 7; \
mod = (rmdat >> 6) & 3; \
rm = rmdat & 7; \
if (mod != 3) \
fetcheal(); \
}
extern void x86illegal(void);
#endif /*EMU_X86_H*/
``` | /content/code_sandbox/src/cpu/x86.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 819 |
```objective-c
static int
opINT3(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
#ifdef USE_GDBSTUB
if (gdbstub_int3())
return 1;
#endif
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
x86_int_sw(3);
CLOCK_CYCLES((is486) ? 44 : 59);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 0, 0, 0, 0);
return 1;
}
static int
opINT1(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
x86_int_sw(1);
CLOCK_CYCLES((is486) ? 44 : 59);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 0, 0, 0, 0);
return 1;
}
static int
opINT(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
uint8_t temp = getbytef();
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
if (cr4 & CR4_VME) {
uint16_t t;
uint8_t d;
cpl_override = 1;
t = readmemw(tr.base, 0x66) - 32;
cpl_override = 0;
if (cpu_state.abrt)
return 1;
t += (temp >> 3);
if (t <= tr.limit) {
cpl_override = 1;
d = readmemb(tr.base, t); // + (temp >> 3));
cpl_override = 0;
if (cpu_state.abrt)
return 1;
if (!(d & (1 << (temp & 7)))) {
x86_int_sw_rm(temp);
PREFETCH_RUN(cycles_old - cycles, 2, -1, 0, 0, 0, 0, 0);
return 1;
}
}
}
x86gpf_expected(NULL, 0);
return 1;
}
x86_int_sw(temp);
PREFETCH_RUN(cycles_old - cycles, 2, -1, 0, 0, 0, 0, 0);
return 1;
}
static int
opINTO(uint32_t fetchdat)
{
int cycles_old = cycles;
UN_USED(cycles_old);
if ((cr0 & 1) && (cpu_state.eflags & VM_FLAG) && (IOPL != 3)) {
x86gpf(NULL, 0);
return 1;
}
if (VF_SET()) {
cpu_state.oldpc = cpu_state.pc;
x86_int_sw(4);
PREFETCH_RUN(cycles_old - cycles, 1, -1, 0, 0, 0, 0, 0);
return 1;
}
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_int.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 798 |
```objective-c
/*
* 86Box A hypervisor and IBM PC system emulator that specializes in
* running old operating systems and software designed for IBM
* PC systems and compatibles from 1981 through fairly recent
* system designs based on the PCI bus.
*
* This file is part of the 86Box distribution.
*
* AMD SYSCALL and SYSRET CPU Instructions.
*
*
*
* Authors: Miran Grca, <mgrca8@gmail.com>
*/
static int
opSYSCALL(uint32_t fetchdat)
{
int ret;
ILLEGAL_ON(!(msr.amd_efer & 0x0000000000000001));
ret = syscall_op(fetchdat);
if (ret <= 1) {
CLOCK_CYCLES(20);
PREFETCH_RUN(20, 7, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
CPU_BLOCK_END();
}
return ret;
}
static int
opSYSRET(uint32_t fetchdat)
{
int ret;
ILLEGAL_ON(!(msr.amd_efer & 0x0000000000000001));
ret = sysret(fetchdat);
if (ret <= 1) {
CLOCK_CYCLES(20);
PREFETCH_RUN(20, 7, -1, 0, 0, 0, 0, 0);
PREFETCH_FLUSH();
CPU_BLOCK_END();
}
return ret;
}
``` | /content/code_sandbox/src/cpu/x86_ops_amd.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 319 |
```objective-c
static int
opCMC(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags ^= C_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCLC(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags &= ~C_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCLD(uint32_t fetchdat)
{
cpu_state.flags &= ~D_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opCLI(uint32_t fetchdat)
{
if (!IOPLp) {
if ((!(cpu_state.eflags & VM_FLAG) && (cr4 & CR4_PVI)) || ((cpu_state.eflags & VM_FLAG) && (cr4 & CR4_VME))) {
cpu_state.eflags &= ~VIF_FLAG;
} else {
x86gpf(NULL, 0);
return 1;
}
} else
cpu_state.flags &= ~I_FLAG;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSTC(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags |= C_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSTD(uint32_t fetchdat)
{
cpu_state.flags |= D_FLAG;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSTI(uint32_t fetchdat)
{
if (!IOPLp) {
if ((!(cpu_state.eflags & VM_FLAG) && (cr4 & CR4_PVI)) || ((cpu_state.eflags & VM_FLAG) && (cr4 & CR4_VME))) {
if (cpu_state.eflags & VIP_FLAG) {
x86gpf(NULL, 0);
return 1;
} else
cpu_state.eflags |= VIF_FLAG;
} else {
x86gpf(NULL, 0);
return 1;
}
} else
cpu_state.flags |= I_FLAG;
/*First instruction after STI will always execute, regardless of whether
there is a pending interrupt*/
cpu_end_block_after_ins = 2;
CLOCK_CYCLES(2);
PREFETCH_RUN(2, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opSAHF(uint32_t fetchdat)
{
flags_rebuild();
cpu_state.flags = (cpu_state.flags & 0xff00) | (AH & 0xd5) | 2;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opLAHF(uint32_t fetchdat)
{
flags_rebuild();
AH = cpu_state.flags & 0xff;
CLOCK_CYCLES(3);
PREFETCH_RUN(3, 1, -1, 0, 0, 0, 0, 0);
return 0;
}
static int
opPUSHF(uint32_t fetchdat)
{
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
if (cr4 & CR4_VME) {
uint16_t temp;
flags_rebuild();
temp = (cpu_state.flags & ~I_FLAG) | 0x3000;
if (cpu_state.eflags & VIF_FLAG)
temp |= I_FLAG;
PUSH_W(temp);
} else {
x86gpf(NULL, 0);
return 1;
}
} else {
flags_rebuild();
PUSH_W(cpu_state.flags);
}
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 1, 0, 0);
return cpu_state.abrt;
}
static int
opPUSHFD(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
if (cpu_CR4_mask & CR4_VME)
tempw = cpu_state.eflags & 0x3c;
else if (CPUID)
tempw = cpu_state.eflags & 0x24;
else
tempw = cpu_state.eflags & 4;
flags_rebuild();
PUSH_L(cpu_state.flags | (tempw << 16));
CLOCK_CYCLES(4);
PREFETCH_RUN(4, 1, -1, 0, 0, 0, 1, 0);
return cpu_state.abrt;
}
static int
opPOPF_186(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
tempw = POP_W();
if (cpu_state.abrt)
return 1;
if (!(msw & 1))
cpu_state.flags = (cpu_state.flags & 0x7000) | (tempw & 0x0fd5) | 2;
else if (!(CPL))
cpu_state.flags = (tempw & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
flags_extract();
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opPOPF_286(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
tempw = POP_W();
if (cpu_state.abrt)
return 1;
if (!(msw & 1))
cpu_state.flags = (cpu_state.flags & 0x7000) | (tempw & 0x0fd5) | 2;
else if (!(CPL))
cpu_state.flags = (tempw & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
flags_extract();
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opPOPF(uint32_t fetchdat)
{
uint16_t tempw;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
if (cr4 & CR4_VME) {
uint32_t old_esp = ESP;
tempw = POP_W();
if (cpu_state.abrt) {
ESP = old_esp;
return 1;
}
if ((tempw & T_FLAG) || ((tempw & I_FLAG) && (cpu_state.eflags & VIP_FLAG))) {
ESP = old_esp;
x86gpf(NULL, 0);
return 1;
}
if (tempw & I_FLAG)
cpu_state.eflags |= VIF_FLAG;
else
cpu_state.eflags &= ~VIF_FLAG;
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
} else {
x86gpf(NULL, 0);
return 1;
}
} else {
tempw = POP_W();
if (cpu_state.abrt)
return 1;
if (!(CPL) || !(msw & 1))
cpu_state.flags = (tempw & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (tempw & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (tempw & 0x4dd5) | 2;
}
flags_extract();
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 1, 0, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
static int
opPOPFD(uint32_t fetchdat)
{
uint32_t templ;
if ((cpu_state.eflags & VM_FLAG) && (IOPL < 3)) {
x86gpf(NULL, 0);
return 1;
}
templ = POP_L();
if (cpu_state.abrt)
return 1;
if (!(CPL) || !(msw & 1))
cpu_state.flags = (templ & 0x7fd5) | 2;
else if (IOPLp)
cpu_state.flags = (cpu_state.flags & 0x3000) | (templ & 0x4fd5) | 2;
else
cpu_state.flags = (cpu_state.flags & 0x3200) | (templ & 0x4dd5) | 2;
templ &= (is486 || isibm486) ? 0x3c0000 : 0;
templ |= ((cpu_state.eflags & 3) << 16);
if (cpu_CR4_mask & CR4_VME)
cpu_state.eflags = (templ >> 16) & 0x3f;
else if (CPUID)
cpu_state.eflags = (templ >> 16) & 0x27;
else if (is486 || isibm486)
cpu_state.eflags = (templ >> 16) & 7;
else
cpu_state.eflags = (templ >> 16) & 3;
flags_extract();
#ifdef USE_DEBUG_REGS_486
rf_flag_no_clear = 1;
#endif
CLOCK_CYCLES(5);
PREFETCH_RUN(5, 1, -1, 0, 1, 0, 0, 0);
#if (defined(USE_DYNAREC) && defined(USE_NEW_DYNAREC))
codegen_flags_changed = 0;
#endif
return 0;
}
``` | /content/code_sandbox/src/cpu/x86_ops_flag.h | objective-c | 2016-06-25T22:29:10 | 2024-08-16T19:09:21 | 86Box | 86Box/86Box | 2,616 | 2,765 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.