//////////////////////////////////////////ok
#include "stdafx.h"
#include "bochs.h"


void  IA32_CPU::write_virtual_checks(ia32_segment_seg_t *seg, ia32_address offset,unsigned length)
{

	Bit32u upper_limit;
	if (seg->cache.valid==0) 
	{
		exception(IA32_GP_EXCEPTION, 0, 0);
		return;
	}

	if (seg->cache.p == 0) 
	{ /* not present */
		exception(int_number(seg), 0, 0);
		return;
	}

	switch (seg->cache.type) 
	{
	case 0: case 1:   // read only
	case 4: case 5:   // read only, expand down
	case 8: case 9:   // execute only
	case 10: case 11: // execute/read
	case 12: case 13: // execute only, conforming
	case 14: case 15: // execute/read-only, conforming
		exception(int_number(seg), 0, 0);
		return;
	case 2: case 3: /* read/write */
		if (offset > (seg->cache.u.segment.limit_scaled - length + 1) || (length-1 > seg->cache.u.segment.limit_scaled))
		{
			exception(int_number(seg), 0, 0);
			return;
		}
		if (seg->cache.u.segment.limit_scaled >= 7) 
		{
			seg->cache.valid |= IA32_SegAccessWOK;
		}
		break;

	case 6: case 7: /* read write, expand down */
		if (seg->cache.u.segment.d_b)
			upper_limit = 0xffffffff;
		else
			upper_limit = 0x0000ffff;

		if ((offset <= seg->cache.u.segment.limit_scaled) || (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
		{
			exception(int_number(seg), 0, 0);
			return;
		}
		break;
	}

	return;

}

void   IA32_CPU::read_virtual_checks(ia32_segment_seg_t *seg, ia32_address offset,unsigned length)
{
	Bit32u upper_limit;
	if (seg->cache.valid==0) 
	{
		exception(IA32_GP_EXCEPTION, 0, 0);
		return;
	}

	if (seg->cache.p == 0) 
	{ /* not present */
		exception(int_number(seg), 0, 0);
		return;
	}

	switch (seg->cache.type) 
	{
	case 0: case 1: /* read only */
	case 10: case 11: /* execute/read */
	case 14: case 15: /* execute/read-only, conforming */
		if (offset > (seg->cache.u.segment.limit_scaled - length + 1) || (length-1 > seg->cache.u.segment.limit_scaled))
		{
			exception(int_number(seg), 0, 0);
			return;
		}

		if (seg->cache.u.segment.limit_scaled >= 7) 
		{
			seg->cache.valid |= IA32_SegAccessROK;
		}
		break;
	case 2: case 3: /* read/write */
		if (offset > (seg->cache.u.segment.limit_scaled - length + 1) || (length-1 > seg->cache.u.segment.limit_scaled)) 
		{
			exception(int_number(seg), 0, 0);
			return;
		}

		if (seg->cache.u.segment.limit_scaled >= 7)
		{
			seg->cache.valid |= IA32_SegAccessROK;
		}
		break;
	case 4: case 5: /* read only, expand down */
		if (seg->cache.u.segment.d_b)
			upper_limit = 0xffffffff;
		else
			upper_limit = 0x0000ffff;

		if ((offset <= seg->cache.u.segment.limit_scaled) || (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
		{
			exception(int_number(seg), 0, 0);
			return;
		}
		break;
	case 6: case 7: /* read write, expand down */
		if (seg->cache.u.segment.d_b)
			upper_limit = 0xffffffff;
		else
			upper_limit = 0x0000ffff;

		if ((offset <= seg->cache.u.segment.limit_scaled) || (offset > upper_limit) || ((upper_limit - offset) < (length - 1)))
		{
			exception(int_number(seg), 0, 0);
			return;
		}
		break;
	case 8: case 9: /* execute only */
	case 12: case 13: /* execute only, conforming */
		exception(int_number(seg), 0, 0);
		return;
	}

	return;
}

int IA32_CPU::int_number(ia32_segment_seg_t *seg)
{

	if (seg == & sregs[IA32_SEG_REG_SS])
		return(IA32_SS_EXCEPTION);
	else
		return(IA32_GP_EXCEPTION);
}

void  IA32_CPU::write_virtual_byte(unsigned s, ia32_address offset, Bit8u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{

		if (offset <= seg->cache.u.segment.limit_scaled) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			//m_PageManager->access_address_n(this,laddr, 1, pl, IA32_WRITE, (void *) data);
			m_vm32->access_address_1(this, laddr, pl, IA32_WRITE, (void *) data);
			return;
		}
	}

	write_virtual_checks(seg, offset, 1);
	goto accessOK;

}

void  IA32_CPU::write_virtual_word(unsigned s, ia32_address offset, Bit16u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset < seg->cache.u.segment.limit_scaled) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
#ifdef  IA32_BIG_ENDIAN
			Bit16u    wData = 0;
			IA32_WriteHostWordToLittleEndian(&wData, *data);
			m_vm32->access_address_n(this,laddr, 2, pl, IA32_WRITE, (void *)&wData);
#else
			m_vm32->access_address_n(this,laddr, 2, pl, IA32_WRITE, (void *) data);
#endif
			return;
		}
	}
	write_virtual_checks(seg, offset, 2);
	goto accessOK;
}

void  IA32_CPU::write_virtual_dword(unsigned s, ia32_address offset, Bit32u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset < (seg->cache.u.segment.limit_scaled-2)) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
#ifdef  IA32_BIG_ENDIAN
			Bit32u    dwData = 0;
			IA32_WriteHostDWordToLittleEndian(&dwData, *data);
			m_vm32->access_address_n(this,laddr, 4, pl, IA32_WRITE, (void *)&dwData);
#else
			m_vm32->access_address_n(this,laddr, 4, pl, IA32_WRITE, (void *) data);
#endif
			return;
		}
	}
	write_virtual_checks(seg, offset, 4);
	goto accessOK;
}


void  IA32_CPU::write_virtual_qword(unsigned s, ia32_address offset, Bit64u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset <= (seg->cache.u.segment.limit_scaled-7)) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
#ifdef  IA32_BIG_ENDIAN
			Bit64u    qwData = 0x0LL;
			IA32_WriteHostQWordToLittleEndian(&qwData, *data);
			m_vm32->access_address_n(this,laddr, 8, pl, IA32_WRITE, (void *)&qwData);
#else
			m_vm32->access_address_n(this,laddr, 8, pl, IA32_WRITE, (void *) data);
#endif
			return;
		}
	}
	write_virtual_checks(seg, offset, 8);
	goto accessOK;
}

void   IA32_CPU::read_virtual_byte(unsigned s, ia32_address offset, Bit8u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessROK) 
	{
		if (offset <= seg->cache.u.segment.limit_scaled) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			//m_PageManager->access_address_n(this,laddr, 1, pl, IA32_READ, (void *) data);
			m_vm32->access_address_1(this, laddr, pl, IA32_READ, (void *) data);
			return;
		}
	}
	read_virtual_checks(seg, offset, 1);
	goto accessOK;
}

void   IA32_CPU::read_virtual_word(unsigned s, ia32_address offset, Bit16u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessROK) 
	{
		if (offset < seg->cache.u.segment.limit_scaled) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);

			m_vm32->access_address_n(this,laddr, 2, pl, IA32_READ, (void *) data);
#ifdef  IA32_BIG_ENDIAN
			IA32_ReadHostWordFromLittleEndian(data, data);
#endif
			return;
		}
	}
	read_virtual_checks(seg, offset, 2);
	goto accessOK;
}

void   IA32_CPU::read_virtual_dword(unsigned s, ia32_address offset, Bit32u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessROK) 
	{
		if (offset < (seg->cache.u.segment.limit_scaled-2)) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);

			m_vm32->access_address_n(this,laddr, 4, pl, IA32_READ, (void *) data);
#ifdef  IA32_BIG_ENDIAN
			IA32_ReadHostDWordFromLittleEndian(data, data);
#endif
			return;
		}
	}
	read_virtual_checks(seg, offset, 4);
	goto accessOK;
}

void   IA32_CPU::read_virtual_qword(unsigned s, ia32_address offset, Bit64u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessROK) 
	{
		if (offset <= (seg->cache.u.segment.limit_scaled-7)) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			m_vm32->access_address_n(this,laddr, 8, pl, IA32_READ, (void *) data);
#ifdef  IA32_BIG_ENDIAN
			IA32_ReadHostQWordFromLittleEndian(data, data);
#endif
			return;
		}
	}
	read_virtual_checks(seg, offset, 8);
	goto accessOK;
}


//////////////////////////////////////////////////////////////
// special Read-Modify-Write operations                     //
// address translation info is kept across read/write calls //
//////////////////////////////////////////////////////////////

void   IA32_CPU::read_RMW_virtual_byte(unsigned s, ia32_address offset, Bit8u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset <= seg->cache.u.segment.limit_scaled) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			m_vm32->access_address_n_rmw_begin(this, laddr, 1, pl, IA32_RW, (void *) data);
			return;
		}
	}
	write_virtual_checks(seg, offset, 1);
	goto accessOK;
}

void  IA32_CPU::read_RMW_virtual_word(unsigned s, ia32_address offset, Bit16u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset < seg->cache.u.segment.limit_scaled) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			m_vm32->access_address_n_rmw_begin(this,laddr, 2, pl, IA32_RW, (void *) data);
#ifdef IA32_BIG_ENDIAN
			IA32_ReadHostWordFromLittleEndian(data, data);
#endif//IA32_BIG_ENDIAN
			return;
		}
	}
	write_virtual_checks(seg, offset, 2);
	goto accessOK;
}

void   IA32_CPU::read_RMW_virtual_dword(unsigned s, ia32_address offset, Bit32u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset < (seg->cache.u.segment.limit_scaled-2)) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			m_vm32->access_address_n_rmw_begin(this,laddr, 4, pl, IA32_RW, (void *) data);
#ifdef IA32_BIG_ENDIAN
			IA32_ReadHostDWordFromLittleEndian(data, data);
#endif//IA32_BIG_ENDIAN
			return;
		}
	}
	write_virtual_checks(seg, offset, 4);
	goto accessOK;
}

void  IA32_CPU::read_RMW_virtual_qword(unsigned s, ia32_address offset, Bit64u *data)
{
	ia32_address laddr;
	ia32_segment_seg_t *seg;

	seg = & sregs[s];
	if (seg->cache.valid & IA32_SegAccessWOK) 
	{
		if (offset <= (seg->cache.u.segment.limit_scaled-7)) 
		{
			unsigned pl;
accessOK:
			laddr =  get_segment_base(s) + offset;
			pl = (CPL==3);
			m_vm32->access_address_n_rmw_begin(this, laddr, 8, pl, IA32_RW, (void *) data);
#ifdef IA32_BIG_ENDIAN
			IA32_ReadHostQWordFromLittleEndian(data, data);
#endif//IA32_BIG_ENDIAN
			return;
		}
	}
	write_virtual_checks(seg, offset, 8);
	goto accessOK;
}

void  IA32_CPU::write_RMW_virtual_byte(Bit8u val8)
{
	m_vm32->access_address_n_rmw_end( this, &val8, 1 );
	//memcpy(m_PageManager->m_LastAccessAddress_1,&val8,1);
	//*(Bit8u*)m_vm32->m_LastAccessAddress_1 = val8;
}


void  IA32_CPU::write_RMW_virtual_word(Bit16u val16)
{
	m_vm32->access_address_n_rmw_end( this, &val16, 2 );
//
//	Bit8u*  pTemp;
//
//#ifdef IA32_BIG_ENDIAN
//	IA32_WriteHostWordToLittleEndian(&val16, val16);
//#endif//IA32_BIG_ENDIAN
//	//    pTemp = (Bit8u*)&val16;
//
//	if (m_vm32->m_LastAccessPages==1) 
//	{
//#ifdef IA32_BIG_ENDIAN
//		memcpy(m_vm32->m_LastAccessAddress_1,&val16,2);
//#else
//		*(Bit16u*)m_vm32->m_LastAccessAddress_1 = val16;
//#endif//IA32_BIG_ENDIAN
//	}
//	else 
//	{
//		pTemp = (Bit8u*)&val16;
//		//memcpy(m_PageManager->m_LastAccessAddress_1,&val16,m_PageManager->m_LastAccess_1_Len);
//		*(Bit8u*)m_vm32->m_LastAccessAddress_1 = *(Bit8u*)pTemp;
//		pTemp++;
//		//memcpy(m_PageManager->m_LastAccessAddress_2,((BYTE *)&val16)+m_PageManager->m_LastAccess_1_Len,m_PageManager->m_LastAccess_2_Len);
//		*(Bit8u*)m_vm32->m_LastAccessAddress_1 = *(Bit8u*)pTemp;
//	}
}


void  IA32_CPU::write_RMW_virtual_dword(Bit32u val32)
{
	m_vm32->access_address_n_rmw_end( this, &val32, 4 );

//	Bit8u*  pTemp;
//
//#ifdef IA32_BIG_ENDIAN
//	IA32_WriteHostDWordToLittleEndian(&val32, val32);
//#endif//IA32_BIG_ENDIAN
//	//    pTemp = (Bit8u*)&val32;
//
//
//	if (m_vm32->m_LastAccessPages==1) 
//	{
//#ifdef IA32_BIG_ENDIAN
//		memcpy(m_vm32->m_LastAccessAddress_1,&val32,4);
//#else
//		*(Bit32u*)m_vm32->m_LastAccessAddress_1 = val32;
//#endif//IA32_BIG_ENDIAN
//	}
//	else 
//	{
//		pTemp = (Bit8u*)&val32;
//#ifdef IA32_BIG_ENDIAN
//		memcpy(m_vm32->m_LastAccessAddress_1,&val32,m_vm32->m_LastAccess_1_Len);
//		memcpy(m_vm32->m_LastAccessAddress_2,((Bit8u *)&val32)+m_vm32->m_LastAccess_1_Len,m_vm32->m_LastAccess_2_Len);
//#else
//		switch(m_vm32->m_LastAccess_1_Len) {
//		case 1:
//			*(Bit8u*)m_vm32->m_LastAccessAddress_1 = *(Bit8u*)pTemp;
//			break;
//		case 2:
//			*(Bit16u*)m_vm32->m_LastAccessAddress_1 = *(Bit16u*)pTemp;
//			break;
//		default:
//			memcpy(m_vm32->m_LastAccessAddress_1,&val32,m_vm32->m_LastAccess_1_Len);
//			break;
//		}
//
//		pTemp += m_vm32->m_LastAccess_1_Len;
//		switch(m_vm32->m_LastAccess_2_Len) {
//		case 1:
//			*(Bit8u*)m_vm32->m_LastAccessAddress_2 = *(Bit8u*)pTemp;
//			break;
//		case 2:
//			*(Bit16u*)m_vm32->m_LastAccessAddress_2 = *(Bit16u*)pTemp;
//			break;
//		default:
//			memcpy(m_vm32->m_LastAccessAddress_2, pTemp, m_vm32->m_LastAccess_2_Len);
//			break;
//		}
//#endif//IA32_BIG_ENDIAN
//	}
}


void  IA32_CPU::write_RMW_virtual_qword(Bit64u val64)
{
	m_vm32->access_address_n_rmw_end( this, &val64, 8 );

//	Bit8u*  pTemp;
//
//#ifdef IA32_BIG_ENDIAN
//	IA32_WriteHostQWordToLittleEndian(&val64, val64);
//#endif//IA32_BIG_ENDIAN
//	//    pTemp = (Bit8u*)&val64;
//
//	if (m_vm32->m_LastAccessPages==1) 
//	{
//#ifdef IA32_BIG_ENDIAN
//		//memcpy(m_PageManager->m_LastAccessAddress_1,&val64,8);
//#else
//		*(Bit64u*)m_vm32->m_LastAccessAddress_1 = val64;
//#endif//IA32_BIG_ENDIAN
//	}
//	else 
//	{
//		pTemp = (Bit8u*)&val64;
//#ifdef IA32_BIG_ENDIAN
//		memcpy(m_vm32->m_LastAccessAddress_1,&val64,m_vm32->m_LastAccess_1_Len);
//		memcpy(m_vm32->m_LastAccessAddress_2,((Bit8u *)&val64)+m_vm32->m_LastAccess_1_Len,m_vm32->m_LastAccess_2_Len);
//#else
//		switch(m_vm32->m_LastAccess_1_Len) {
//		case 1:
//			*(Bit8u*)m_vm32->m_LastAccessAddress_1 = *(Bit8u*)pTemp;
//			break;
//		case 2:
//			*(Bit16u*)m_vm32->m_LastAccessAddress_1 = *(Bit16u*)pTemp;
//			break;
//		case 4:
//			*(Bit32u*)m_vm32->m_LastAccessAddress_1 = *(Bit32u*)pTemp;
//			break;
//		default:
//			memcpy(m_vm32->m_LastAccessAddress_1,&val64,m_vm32->m_LastAccess_1_Len);
//			break;
//		}
//
//		pTemp += m_vm32->m_LastAccess_1_Len;
//		switch(m_vm32->m_LastAccess_2_Len)
//		{
//		case 1:
//			*(Bit8u*)m_vm32->m_LastAccessAddress_2 = *(Bit8u*)pTemp;
//			break;
//		case 2:
//			*(Bit16u*)m_vm32->m_LastAccessAddress_2 = *(Bit16u*)pTemp;
//			break;
//		case 4:
//			*(Bit32u*)m_vm32->m_LastAccessAddress_2 = *(Bit32u*)pTemp;
//			break;
//		default:
//			memcpy(m_vm32->m_LastAccessAddress_2, pTemp, m_vm32->m_LastAccess_2_Len);
//			break;
//		}
//#endif//IA32_BIG_ENDIAN
//	}
}


//
// Some macro defs to make things cleaner for endian-ness issues.
// The following routines access a double qword, ie 16-bytes.
// For the moment, I redirect these to use the single qword routines
// by splitting one access into two.
//
// Endian  Host byte order         Guest (x86) byte order
// ======================================================
// Little  0..7 8..15               0..7 8..15
// Big    15..8 7...0               0..7 8..15
//
// Below are the host memory offsets to each of 2 single quadwords, which
// are different across big an little endian machines.  The memory
// accessing routines take care of the access endian issues when accessing
// the physical memory image.
//


#ifdef IA32_LITTLE_ENDIAN
#  define Host1stDWordOffset 0
#  define Host2ndDWordOffset 8
#else
#  define Host1stDWordOffset 8
#  define Host2ndDWordOffset 0
#endif


void  IA32_CPU::read_virtual_dqword(unsigned s, ia32_address offset, Bit8u *data)
{
	// Read Double Quadword.
	Bit64u *qwords = (Bit64u*) data;
	read_virtual_qword(s, offset+Host1stDWordOffset, &qwords[0]);
	read_virtual_qword(s, offset+Host2ndDWordOffset, &qwords[1]);
}

void   IA32_CPU::read_virtual_dqword_aligned(unsigned s, ia32_address offset, Bit8u *data)
{
	// If double quadword access is unaligned, #GP(0).
	if (offset & 0xf) 
	{
		exception(IA32_GP_EXCEPTION, 0, 0);
	}
	read_virtual_dqword(s, offset, data);
}

void  IA32_CPU::write_virtual_dqword(unsigned s, ia32_address offset, Bit8u *data)
{
	// Write Double Quadword.
	Bit64u *qwords = (Bit64u*) data;
	write_virtual_qword(s, offset+Host1stDWordOffset, &qwords[0]);
	write_virtual_qword(s, offset+Host2ndDWordOffset, &qwords[1]);
}

void  IA32_CPU::write_virtual_dqword_aligned(unsigned s, ia32_address offset, Bit8u *data)
{
	// If double quadword access is unaligned, #GP(0).
	if (offset & 0xf) 
	{
		exception(IA32_GP_EXCEPTION, 0, 0);
	}
	write_virtual_dqword(s, offset, data);
}


void  IA32_CPU::read_virtual_tword(unsigned s, ia32_address offset, floatx80 *data)
{
	// read floating point register
	read_virtual_qword(s, offset+0, &data->fraction);
	read_virtual_word (s, offset+8, &data->exp);
}

void  IA32_CPU::write_virtual_tword(unsigned s, ia32_address offset, floatx80 *data)
{
	// store floating point register
	write_virtual_qword(s, offset+0, &data->fraction);
	write_virtual_word (s, offset+8, &data->exp);
}
