#include "v_cpu.h"
#include "memory_avm.h"
#include "type.h"
#include <assert.h>
#include <stdio.h>

#define TLB_SYSONLY 1
#define TLB_READONLY 2
#define TLB_BAN_HOSTPTR 0x800
#define TLB_GLOBALPAGE 0x80000000
#define AVM_INVALID_PAGE_ENTRY 0XFFFFFFFF
#define AVM_PRIV_CHECK_SIZE 32
#define AVM_MEM(x) (&avm_mem)
extern avm_memory avm_mem;

/*
 * |4 |3 |2 |1 |0 |
 * |wp|us|us|rw|rw|
 *  |  |  |  |  |
 *  |  |  |  |  +--- rw of current access
 *  |  |  +--+------ rw & us combined of ped and pte
 *  |  +------------ us of current access
 *  +--------------- Current of cr0.WP value
 * */
static unsigned priv_check[AVM_PRIV_CHECK_SIZE];

void v_cpu::TLB_init() {
	unsigned i, cr0_wp, current_us, combined_us, combined_rw, current_rw;

	for (i = 0; i < AVM_PRIV_CHECK_SIZE; i++) {
		cr0_wp = (i & 0x10) >> 4;
		current_us = (i & 0x8) >> 3;
		combined_us = (i & 0x4) >> 2;
		combined_rw = (i & 0x2) >> 1;
		current_rw = (i & 0x1) >> 0;

		if (cr0_wp) {
			if (current_us > combined_us) { // User access, supervisor page
				priv_check[i] = 0;
			}
			else if (current_rw > combined_rw) { // Write access, RO page
				priv_check[i] = 0;
			}
			else {
				priv_check[i] = 1;
			}
		}
		else {
			if (current_us == 0) { // Supervisor page, any access
				priv_check[i] = 1;
			}
			else if (current_rw > combined_rw) { // Write access, RO page
				priv_check[i] = 0;
			}
			else {
				priv_check[i] = 1;
			}
		} // End of if (cr0_wp)
	}

	TLB_flush();
}

void v_cpu::TLB_flush() {
	for (int i = 0; i < TLB_SIZE; i++) {
		TLB.entry[i].lpf = AVM_INVALID_PAGE_ENTRY;
	}
}

void v_cpu::access_read_linear(bit32u laddr, unsigned len, unsigned cpl,
		unsigned xlate_rw, void *data) {
	assert((xlate_rw == AVM_READ) || (xlate_rw == AVM_RW));
	bit32u page_offset = PAGE_OFFSET(laddr);

	if (cr0.get_PG()) {
		if ((page_offset + len) <= 4096) {
			// Access within a single page
			address_xlate.paddr1 = translate_linear(laddr, cpl, xlate_rw);
			address_xlate.pages = 1;
			access_read_physical(address_xlate.paddr1, len, data);
		}
		else {
			address_xlate.paddr1 = translate_linear(laddr, cpl, xlate_rw);
			address_xlate.len1 = 4096 - page_offset;
			address_xlate.len2 = len - address_xlate.len1;
			address_xlate.paddr2 = translate_linear((laddr + address_xlate.len1), cpl, xlate_rw);
			access_read_physical(address_xlate.paddr1, address_xlate.len1, data);
			access_read_physical(address_xlate.paddr2, address_xlate.len2, ((bit8u *)data + address_xlate.len1));
		} // End of if ((offset + len) < 4096)
	}
	else {
		if ((page_offset + len) <= 4096) {
			address_xlate.paddr1 = laddr;
			address_xlate.pages = 1;
			unsigned tlb_index = TLB_INDEX_OF(laddr, 0);
			avm_tlb_entry *entry = &TLB.entry[tlb_index];
			avm_addr lpf = LPFOF(laddr);

			if (TLB_LPFOF(entry->lpf) != lpf) {
				avm_hostpage_addr hostPageAddr= (avm_hostpage_addr)AVM_MEM(0)->get_vector(laddr);
				if (hostPageAddr) {
					entry->lpf = lpf;
					entry->ppf = (avm_phy_addr)lpf;
					entry->host_page_addr = hostPageAddr;
					entry->access_bits = TLB_READONLY;
				}
			}
			access_read_physical(address_xlate.paddr1, len, data);
		}
		else {
			address_xlate.paddr1 = laddr;
			address_xlate.len1 = 4096 - page_offset;
			address_xlate.len2 = len - address_xlate.len1;
			address_xlate.paddr2 = (avm_phy_addr)(laddr + address_xlate.len1);
			address_xlate.pages = 2;

			access_read_physical(address_xlate.paddr1, address_xlate.len1, data);
			access_read_physical(address_xlate.paddr2, address_xlate.len2, 
					((bit8u *)data + address_xlate.len1));
		} 
	}

}

void v_cpu::access_write_linear(bit32u laddr, unsigned len ,unsigned cpl, 
		unsigned xlate_rw, void *data) {
	assert(xlate_rw == AVM_RW);
	bit32u page_offset = PAGE_OFFSET(laddr);

	if (cr0.get_PG()) {
		if (page_offset + len <= 4096) {
			address_xlate.paddr1 = translate_linear(laddr, cpl, xlate_rw);
			address_xlate.pages = 1;
			access_write_physical(address_xlate.paddr1, len, data);
		}
		else {
			address_xlate.paddr1 = translate_linear(laddr, cpl, xlate_rw);
			address_xlate.len1 = 4096 - page_offset;
			address_xlate.paddr2 = translate_linear(laddr + address_xlate.len1, cpl, xlate_rw);
			address_xlate.len2 = len - address_xlate.len1;
			address_xlate.pages = 2;
			access_write_physical(address_xlate.paddr1, address_xlate.len1, data);
			access_write_physical(address_xlate.paddr2, address_xlate.len2, 
					((bit8u *)data + address_xlate.len1));
		}
	}
	else {
		if ((page_offset + len) <= 4096) {
			address_xlate.paddr1 = laddr;
			address_xlate.len1 = len;
			address_xlate.pages = 1;
			unsigned tlb_index = TLB_INDEX_OF(laddr, 0);
			avm_tlb_entry *pentry = &TLB.entry[tlb_index];
			unsigned lpf = LPFOF(laddr);

			if (TLB_LPFOF(pentry->lpf) != lpf) {
				avm_hostpage_addr hostPageAddr = (avm_hostpage_addr)AVM_MEM(0)->get_vector(laddr);	
				if (hostPageAddr) {
					pentry->lpf = lpf;
					pentry->ppf = (avm_phy_addr)lpf;
					pentry->host_page_addr = hostPageAddr;
					pentry->access_bits = 0;
				}
			}
			access_write_physical(address_xlate.paddr1, address_xlate.len1, data);
		}
		else {
			address_xlate.paddr1 = laddr;
			address_xlate.len1 = 4096 - page_offset;
			address_xlate.paddr2 = laddr + address_xlate.len1;
			address_xlate.len2 = len - address_xlate.len1;
			address_xlate.pages = 2;
			access_write_physical(address_xlate.paddr1, address_xlate.len1, data);
			access_write_physical(address_xlate.paddr2, address_xlate.len2, 
					((bit8u *)data + address_xlate.len1));
		}
	}
}

avm_phy_addr v_cpu::translate_linear(bit32u laddr, unsigned cpl, 
		unsigned xlate_rw) {
	avm_phy_addr paddress, poffset = PAGE_OFFSET(laddr); 
	avm_bool isWrite = xlate_rw & 1;
	unsigned pl = (cpl == 3);

	assert(cr0.get_PG());
	avm_addr lpf = LPFOF(laddr);
	unsigned index = TLB_INDEX_OF(lpf, 0);
	avm_tlb_entry *tlb_index = &TLB.entry[index];

	if (TLB_LPFOF(tlb_index->lpf) == lpf) {
		paddress = tlb_index->ppf | poffset;	
		if (!(tlb_index->access_bits & ((xlate_rw << 1) | pl)))
			return paddress;
	}

	bit32u pde, pte;
	avm_phy_addr pde_addr = cr3_masked | ((laddr & 0xFFC00000) >> 20);
	access_read_physical(pde_addr, 4, &pde);
	assert(pde & 0x1); // PDE must be present in memory.
	
	// Get pte
	avm_phy_addr pte_addr = (pde & 0xFFFFF000) | ((laddr & 0x3FF000) >> 10);
	access_read_physical(pte_addr, 4, &pte);
	assert(pte & 0x1);
	
	unsigned combined = (pde & pte) & 0x6;
	unsigned priv_index = (cr0.get_WP() << 4) | (pl << 3) | combined | isWrite;

	if (!priv_check[priv_index]) {
		printf("ERROR PROTECTION.\n");
		while(1); // hang here
	} 

	// Update bit A if needed
	if (!(pde & 0x20)) {
		pde |= 0x20;
		access_write_physical(pde_addr, 4, &pde);
	}

	// Update PTE bit A/D if need
	if((pte & 0x20) || ((isWrite) && ((pte & 0x40) == 0))) {
		pte |= 0x20 | (isWrite << 6);
		access_write_physical(pte_addr, 4, &pte);
	}

	bit32u ppf = pte & 0xFFFFF000;
	paddress = ppf | poffset;

	tlb_index->lpf = lpf | 0x800;
	tlb_index->ppf = ppf;
	tlb_index->access_bits = 0;

	if (combined & 0x04 == 0) {
		tlb_index->access_bits |= TLB_SYSONLY;
		if (!isWrite) {
			tlb_index->access_bits |= TLB_READONLY;
		}
	}
	else {
		if (!isWrite || (combined &0x02 == 0)) {
			tlb_index->access_bits |= TLB_READONLY;
		}
	}

	tlb_index->host_page_addr = (avm_phy_addr)AVM_MEM(0)->get_vector(ppf);
	
	if (tlb_index->host_page_addr)
		tlb_index->lpf = lpf; 
	
	return paddress;
}

void v_cpu::access_read_physical(avm_phy_addr paddr, unsigned len, void *data) {
	AVM_MEM(0)->read_physical_page(this, paddr, len, data);
}

void v_cpu::access_write_physical(avm_phy_addr paddr, unsigned len ,void *data) {
	AVM_MEM(0)->write_physical_page(this, paddr, len, data);		
}
