﻿#include "ntifs.h"
#include "ntddk.h"
#include "AsmCall.h"
#include "Util.h"
#include "vm.h"
#include "ept.h"
#include "common.h"
#include "VMM.h"
#include "ia32_types.h"
#include "HooksFunction.h"

extern PVOID pFunctionOpenProcess;
extern PVOID pFunctionIofCallDriver;

extern PDEVICE_OBJECT pRealDeviceObj[26];
extern PIRP pHookIrp;
extern ULONG nCurrentHookDeviceObj;

void VmmpAdjustGuestInstructionPointer(GuestContext *guest_context);

void VmmpInjectInterruption(
	InterruptionType interruption_type, InterruptionVector vector,
	bool deliver_error_code, ULONG32 error_code) {
	VmEntryInterruptionInformationField inject = {};
	inject.fields.valid = true;
	inject.fields.interruption_type = (ULONG32)interruption_type;
	inject.fields.vector = (ULONG32)(vector);
	inject.fields.deliver_error_code = deliver_error_code;
	Asm_vmxWrite((ULONG32)VmcsField::kVmEntryIntrInfoField, inject.all);

	if (deliver_error_code)
	{
		Asm_vmxWrite((ULONG32)VmcsField::kVmEntryExceptionErrorCode, error_code);
	}

	return;
}

void VmmpAdjustGuestInstructionPointer(GuestContext *guest_context) {
	ULONG_PTR guest_inst_length = Asm_VmRead((ULONG_PTR)VmcsField::kVmExitInstructionLen);
	Asm_vmxWrite((ULONG32)VmcsField::kGuestRip, guest_context->ip + guest_inst_length);

	if (guest_context->flag_reg.fields.tf)
	{
		VmmpInjectInterruption(InterruptionType::kHardwareException, InterruptionVector::kDebugException, false, 0);
		Asm_vmxWrite((ULONG32)VmcsField::kVmEntryInstructionLen, guest_inst_length);
	}
}

ULONG_PTR NtOpenProcessDealFunction(PDEAL_FUNCTION_PARAM pDealFunctionParam, ProcessorData* pParamPointer) {
	PCLIENT_ID pClientID = (PCLIENT_ID)(pDealFunctionParam->r9);
	for (struct HookPid *i = pParamPointer->pFirstHookPid; i != 0; i = i->pNext)
	{
		if ((ULONG_PTR)(pClientID->UniqueProcess) == i->nPid)
		{
			KdPrint(("[Whisper]Keep out!\n"));
			*(NTSTATUS*)(&(pDealFunctionParam->rax)) = STATUS_ACCESS_DENIED;
			return 0x808;
		}
	}
	return 0;
}

ULONG_PTR nPrintCount = 0;

ULONG_PTR IofCallDriverDealFunction(PDEAL_FUNCTION_PARAM pDealFunctionParam, ProcessorData* pParamPointer) {
	PIRP pIrp = (PIRP)(pDealFunctionParam->rdx);
	PDEVICE_OBJECT pCurrentDevice = (PDEVICE_OBJECT)(pDealFunctionParam->rcx);
	if (pIrp == pHookIrp)
	{
		pRealDeviceObj[nCurrentHookDeviceObj] = pCurrentDevice;
		KdPrint(("[Whisper]Find Device_Object: %p\n", pCurrentDevice));
	}
	return 0;
}

unsigned char* HookPageByEpt(GuestContext* guest_context, PHYSICAL_ADDRESS dtPhy) {
	unsigned char* pReturnMemory = 0;
	unsigned char* pMemory = (unsigned char*)ExAllocatePoolWithTag(NonPagedPool, 4096, kHyperPlatformCommonPoolTag);
	if (pMemory)
	{
		PHYSICAL_ADDRESS dtPhyAddress = dtPhy;
		PHYSICAL_ADDRESS dtCopyMemory;
		dtCopyMemory.QuadPart = dtPhyAddress.QuadPart & ~(0xFFFu);
		for (int i = 0; i < 4096; i++, dtCopyMemory.QuadPart++)
		{
			unsigned char* pPointer = (unsigned char*)MmGetVirtualForPhysical(dtCopyMemory);
			__try {
				pMemory[i] = *pPointer;
			}
			__except (EXCEPTION_EXECUTE_HANDLER) {
				continue;
			}
		}

		PHYSICAL_ADDRESS dtPhysicalAddress = MmGetPhysicalAddress(pMemory);
		EptCommonEntry* pEptCommonEntry = EptGetEptPtEntry(guest_context->stack->processor_data->ept_data, dtPhyAddress.QuadPart);
		if (pEptCommonEntry) {

			//采用执行地址修改，读取写入地址不变的方法
			PHOOK_EPT_STRUCT pHookStructBuffer = (PHOOK_EPT_STRUCT)ExAllocatePoolWithTag(NonPagedPool, sizeof(HOOK_EPT_STRUCT), kHyperPlatformCommonPoolTag);

			if (pHookStructBuffer != NULL)
			{
				pHookStructBuffer->nHookRingValue = 0;
				pHookStructBuffer->UserProcCr3 = 0;
				pHookStructBuffer->nProcessPid = 0;
				pHookStructBuffer->bMTFing = false;
				pHookStructBuffer->bUsingNow = true;
				pHookStructBuffer->Hook_execute_access = 1;
				pHookStructBuffer->Hook_memory_type = pEptCommonEntry->fields.memory_type;
				pHookStructBuffer->Hook_read_access = 0;
				pHookStructBuffer->Hook_write_access = 0;
				pHookStructBuffer->Original_execute_access = 0;
				pHookStructBuffer->Original_memory_type = pEptCommonEntry->fields.memory_type;
				pHookStructBuffer->Original_read_access = 1;
				pHookStructBuffer->Original_write_access = 1;
				pHookStructBuffer->pOriginalPA = pEptCommonEntry->fields.physial_address;
				pHookStructBuffer->pHookPA = UtilPfnFromPa(dtPhysicalAddress.QuadPart);
				//pHookStructBuffer->CallBackFunction = (ULONG_PTR)pDealFunctionProc;
				pHookStructBuffer->pHookVA = (ULONG_PTR)pMemory;

				if (InsertHookStructIntoList(&(guest_context->stack->processor_data->pRing0HookData), pHookStructBuffer))
				{
					pEptCommonEntry->fields.execute_access = 0;
					pEptCommonEntry->fields.read_access = 1;

					pEptCommonEntry->fields.write_access = 1;
					KdPrint(("[Whisper][Successfully]OriginalPhysical: 0x%I64X ---->  0x%I64X  VirtualAddress:0x%p\n", dtPhyAddress.QuadPart, dtPhysicalAddress.QuadPart, pMemory));
					UtilInveptGlobal();
					pReturnMemory = pMemory;
				}
				else {
					ExFreePoolWithTag(pHookStructBuffer, kHyperPlatformCommonPoolTag);
					ExFreePoolWithTag(pMemory, kHyperPlatformCommonPoolTag);
					KdPrint(("[Whisper][Successfully]Faile: 0x%I64X ---->  0x%I64X  VirtualAddress:0x%p\n", dtPhyAddress.QuadPart, dtPhysicalAddress.QuadPart, pMemory));
				}
			}
			else {
				ExFreePoolWithTag(pMemory, kHyperPlatformCommonPoolTag);
			}
		}
		else {
			ExFreePoolWithTag(pMemory, kHyperPlatformCommonPoolTag);
		}
	}

	return pReturnMemory;
}

unsigned char* HookApiNameByEpt(GuestContext *guest_context,PVOID pHookFunctionAddress, ULONG_PTR pDealFunctionProc) {
	unsigned char* pReturnMemory = 0;
	if (pHookFunctionAddress) {
		PHYSICAL_ADDRESS dtPhyAddress = MmGetPhysicalAddress(pHookFunctionAddress);

		unsigned char* pMemory = 0;
		PHOOK_EPT_STRUCT pStruct = 0;
		bool bNewPageHook = true;
		if ((pStruct = (GetFunctionHookedStruct(&(guest_context->stack->processor_data->pRing0HookData), UtilPfnFromPa(dtPhyAddress.QuadPart)))) != 0)
		{
			bNewPageHook = false;
			pMemory = (unsigned char *)(pStruct->pHookVA);
			KdPrint(("[Whisper]GetMemoryFromOld: 0x%p\n", pMemory));
		}
		else {
			pMemory = HookPageByEpt(guest_context, dtPhyAddress);
			KdPrint(("[Whisper]Build Meory: 0x%p\n", pMemory));
		}
		if (pMemory != NULL)
		{
			dtPhyAddress = MmGetPhysicalAddress(pHookFunctionAddress);
			PHYSICAL_ADDRESS dtPhysicalAddress = MmGetPhysicalAddress(pMemory);
			unsigned char* pHookAddress = pMemory + ((ULONG_PTR)pHookFunctionAddress & 0xFFF);
			unsigned int nCodeLen = 0;
			unsigned char* pHookAddresspTemp = pHookAddress;
			KdPrint(("[Whisper]Pointer: %p\n", pHookAddress));
			while (nCodeLen < Asm_GetJmpCodeLen())
			{
				unsigned int nCurrentLen = LDE(pHookAddresspTemp);
				for (ULONG i = 0; i < nCurrentLen; i++) {
					KdPrint(("%x ", pHookAddresspTemp[i]));
				}
				KdPrint(("\n"));
				pHookAddresspTemp += nCurrentLen;
				nCodeLen += nCurrentLen;
			}
			//KdPrint(("[Whisper][Successfully]: 0x%I64X ---->  0x%I64X  VirtualAddress:0x%p Len:%d\n", dtPhyAddress.QuadPart, dtPhysicalAddress.QuadPart, pMemory, nCodeLen));
			if (HookProcFunction((ULONG_PTR)pHookAddress, (ULONG_PTR)pHookFunctionAddress, (ULONG_PTR)pDealFunctionProc, nCodeLen, (ULONG_PTR)(guest_context->stack->processor_data)))
			{
				pReturnMemory = pMemory;
				KdPrint(("[Whisper][Successfully]: 0x%I64X ---->  0x%I64X  VirtualAddress:0x%p Len:%d\n", dtPhyAddress.QuadPart, dtPhysicalAddress.QuadPart, pMemory, nCodeLen));
			}
			else {
				KdPrint(("[Whisper][Faile]: 0x%I64X ---->  0x%I64X  VirtualAddress:0x%p Len:%d\n", dtPhyAddress.QuadPart, dtPhysicalAddress.QuadPart, pMemory, nCodeLen));
			}

		}
	}

	return pReturnMemory;
}

bool UnHookPageByEpt(GuestContext* guest_context, PHYSICAL_ADDRESS dtPhy) {

	EptCommonEntry* pEptCommonEntry = EptGetEptPtEntry(guest_context->stack->processor_data->ept_data, dtPhy.QuadPart);
	if (pEptCommonEntry)
	{
		//guest_context->stack->processor_data->pHookData
		PHOOK_EPT_STRUCT pEptStruct = GetFunctionHookedStruct(&(guest_context->stack->processor_data->pRing0HookData), UtilPfnFromPa(dtPhy.QuadPart));
		if (pEptStruct != 0)
		{
			pEptCommonEntry->fields.physial_address = pEptStruct->pOriginalPA;
			pEptCommonEntry->fields.execute_access = 1;
			pEptCommonEntry->fields.read_access = 1;
			pEptCommonEntry->fields.write_access = 1;
			PHOOK_EPT_STRUCT pHookStruct = RemoveHookStructByPA(&(guest_context->stack->processor_data->pRing0HookData), UtilPfnFromPa(dtPhy.QuadPart));
			UtilInveptGlobal();
			if (pHookStruct)
			{
				ExFreePoolWithTag((void*)(pHookStruct->pHookVA), kHyperPlatformCommonPoolTag);
				ExFreePoolWithTag(pHookStruct, kHyperPlatformCommonPoolTag);
			}
			return true;
		}
	}

	return false;
}

void VmmpHandleCpuid(GuestContext *guest_context) {
	ULONG32 szArrary[4];
	Asm_cpuid(guest_context->gp_regs->ax, szArrary);

	if ((ULONG32)guest_context->gp_regs->ax == 'LLLL')
	{
		szArrary[0] = 'GDoG';
	}
	else if ((ULONG32)guest_context->gp_regs->ax == 1) {
		// Present existence of a hypervisor using the HypervisorPresent bit
		CpuFeaturesEcx cpu_features = { static_cast<ULONG32>(szArrary[2]) };
		cpu_features.fields.not_used = true;
		szArrary[2] = static_cast<int>(cpu_features.all);
	}

	guest_context->gp_regs->ax = szArrary[0];
	guest_context->gp_regs->bx = szArrary[1];
	guest_context->gp_regs->cx = szArrary[2];
	guest_context->gp_regs->dx = szArrary[3];

	VmmpAdjustGuestInstructionPointer(guest_context);
}

UCHAR VmmpGetGuestCpl();

void VmmpHandleException(
	GuestContext *guest_context) {
	const VmExitInterruptionInformationField exception = {
		(ULONG32)(Asm_VmRead((ULONG_PTR)VmcsField::kVmExitIntrInfo)) };
	const InterruptionType interruption_type = (InterruptionType)(exception.fields.interruption_type);
	const InterruptionVector vector = (InterruptionVector)(exception.fields.vector);

	if (interruption_type == InterruptionType::kHardwareException) {
		// Hardware exception
		if (vector == InterruptionVector::kPageFaultException) {
			// #PF
			const PageFaultErrorCode fault_code = {(ULONG32)(Asm_VmRead((ULONG_PTR)VmcsField::kVmExitIntrErrorCode)) };
			const ULONG_PTR fault_address = Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification);

			VmmpInjectInterruption(interruption_type, vector, true, fault_code.all);
			Asm_WriteCr2(fault_address);

		}
		else if (vector == InterruptionVector::kGeneralProtectionException) {
			// # GP
			const ULONG32 error_code = (ULONG32)(Asm_VmRead((ULONG_PTR)VmcsField::kVmExitIntrErrorCode));

			VmmpInjectInterruption(interruption_type, vector, true, error_code);

		}
		else if (vector == InterruptionVector::kDebugException)
		{
			//KdPrint(("[Whisper]Insert HardWard kDebugException!\n"));

			ULONG_PTR nGuestRIP = (ULONG_PTR)(Asm_VmRead((ULONG_PTR)VmcsField::kGuestRip));

			ULONG_PTR nProcessID = (ULONG_PTR)PsGetCurrentProcessId();
			if (VmmpGetGuestCpl() == 3 && FindRing3UserProcIsHooked(&(guest_context->stack->processor_data->pRing3HookData), nProcessID))
			{
				//KdPrint(("[Whisper]Insert HardWard kDebugException PID: %d care RIP:%p !\n", nProcessID, nGuestRIP));
				ULONG_PTR nJmpVa = GetRing3HookByPIDAndOriginalVA(&(guest_context->stack->processor_data->pRing3HookData), nGuestRIP, nProcessID);
				if (nJmpVa != 0)
				{
					Asm_vmxWrite((ULONG32)VmcsField::kGuestRip, nJmpVa);
				}
				//guest_context->flag_reg.fields.tf = 1;
				ULONG_PTR nCurrentFlags = Asm_VmRead((ULONG32)VmcsField::kGuestRflags);
				
				if ((nCurrentFlags & 0x100) == 0)
				{
					nCurrentFlags |= 0x100;
					Asm_vmxWrite((ULONG32)VmcsField::kGuestRflags, nCurrentFlags);
				}
			}
			else {
				KdPrint(("[Whisper]Insert HardWard kDebugException PID: %d not care!\n", nProcessID));
				const ULONG32 error_code = (ULONG32)(Asm_VmRead((ULONG_PTR)VmcsField::kVmExitIntrErrorCode));

				VmmpInjectInterruption(InterruptionType::kHardwareException, InterruptionVector::kDebugException, false, 0);
				ULONG_PTR guest_inst_length = Asm_VmRead((ULONG_PTR)VmcsField::kVmExitInstructionLen);

				Asm_vmxWrite((ULONG32)VmcsField::kVmEntryInstructionLen, guest_inst_length);
			}
		}
		else {
		}

	}
	else if (interruption_type == InterruptionType::kSoftwareException) {
		// Software exception
		if (vector == InterruptionVector::kBreakpointException) {
			// #BP
			VmmpInjectInterruption(interruption_type, vector, false, 0);
			const ULONG_PTR exit_inst_length = Asm_VmRead((ULONG_PTR)VmcsField::kVmExitInstructionLen);
			Asm_vmxWrite((ULONG32)VmcsField::kVmEntryInstructionLen, exit_inst_length);

		}
		else {
		}
	}
	else {
	}
}

void VmmpHandleInvalidateInternalCaches(GuestContext *guest_context) {

	AsmInvalidateInternalCaches();
	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleInvalidateTlbEntry(
	GuestContext *guest_context) {

	const auto invalidate_address =(void *)(Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification));
	UtilInvvpidIndividualAddress((USHORT)(KeGetCurrentProcessorNumberEx(nullptr) + 1),invalidate_address);
	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleRdtsc(
	GuestContext *guest_context) {
	ULARGE_INTEGER tsc = {};
	tsc.QuadPart = __rdtsc();
	guest_context->gp_regs->dx = tsc.HighPart;
	guest_context->gp_regs->ax = tsc.LowPart;

	VmmpAdjustGuestInstructionPointer(guest_context);
}

ULONG_PTR *VmmpSelectRegister(
	ULONG index, GuestContext *guest_context) {
	ULONG_PTR *register_used = nullptr;
	// clang-format off
	switch (index) {
	case 0: register_used = &guest_context->gp_regs->ax; break;
	case 1: register_used = &guest_context->gp_regs->cx; break;
	case 2: register_used = &guest_context->gp_regs->dx; break;
	case 3: register_used = &guest_context->gp_regs->bx; break;
	case 4: register_used = &guest_context->gp_regs->sp; break;
	case 5: register_used = &guest_context->gp_regs->bp; break;
	case 6: register_used = &guest_context->gp_regs->si; break;
	case 7: register_used = &guest_context->gp_regs->di; break;
#if defined(_AMD64_)
	case 8: register_used = &guest_context->gp_regs->r8; break;
	case 9: register_used = &guest_context->gp_regs->r9; break;
	case 10: register_used = &guest_context->gp_regs->r10; break;
	case 11: register_used = &guest_context->gp_regs->r11; break;
	case 12: register_used = &guest_context->gp_regs->r12; break;
	case 13: register_used = &guest_context->gp_regs->r13; break;
	case 14: register_used = &guest_context->gp_regs->r14; break;
	case 15: register_used = &guest_context->gp_regs->r15; break;
#endif
	//default: HYPERPLATFORM_COMMON_DBG_BREAK(); break;
	}
	// clang-format on
	return register_used;
}

void VmmpHandleCrAccess(
	GuestContext *guest_context) {

	const MovCrQualification exit_qualification = {
		Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification) };

	const auto register_used =
		VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);

	switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) {
	case MovCrAccessType::kMoveToCr:
		switch (exit_qualification.fields.control_register) {
			// CR0 <- Reg
		case 0: {

			if (UtilIsX86Pae()) {
				//UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3));
			}
			const Cr0 cr0_fixed0 = { Asm_VmRead((ULONG_PTR)Msr::kIa32VmxCr0Fixed0) };
			const Cr0 cr0_fixed1 = { Asm_VmRead((ULONG_PTR)Msr::kIa32VmxCr0Fixed1) };
			Cr0 cr0 = { *register_used };
			cr0.all &= cr0_fixed1.all;
			cr0.all |= cr0_fixed0.all;
			Asm_vmxWrite((ULONG32)VmcsField::kGuestCr0, cr0.all);
			Asm_vmxWrite((ULONG32)VmcsField::kCr0ReadShadow, cr0.all);
			break;
		}

			// CR3 <- Reg
		case 3: {

			if (UtilIsX86Pae()) {
				//UtilLoadPdptes(VmmpGetKernelCr3());
			}
			// Under some circumstances MOV to CR3 is not *required* to flush TLB
			// entries, but also NOT prohibited to do so. Therefore, we flush it
			// all time.
			// See: Operations that Invalidate TLBs and Paging-Structure Caches
			UtilInvvpidSingleContextExceptGlobal((USHORT)(KeGetCurrentProcessorNumberEx(nullptr) + 1));

			// The MOV to CR3 does not modify the bit63 of CR3. Emulate this
			// behavior.
			// See: MOV - Move to/from Control Registers
			Asm_vmxWrite((ULONG32)VmcsField::kGuestCr3, (*register_used & ~(1ULL << 63)));
			break;
		}

			// CR4 <- Reg
		case 4: {

			if (UtilIsX86Pae()) {
				//UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3));
			}
			UtilInvvpidAllContext();
			const Cr4 cr4_fixed0 = { Asm_VmRead((ULONG_PTR)Msr::kIa32VmxCr4Fixed0) };
			const Cr4 cr4_fixed1 = { Asm_VmRead((ULONG_PTR)Msr::kIa32VmxCr4Fixed1) };
			Cr4 cr4 = { *register_used };
			cr4.all &= cr4_fixed1.all;
			cr4.all |= cr4_fixed0.all;
			Asm_vmxWrite((ULONG32)VmcsField::kGuestCr4, cr4.all);
			Asm_vmxWrite((ULONG32)VmcsField::kCr4ReadShadow, cr4.all);
			break;
		}

			// CR8 <- Reg
		case 8: {

			guest_context->cr8 = *register_used;
			break;
		}

		default:
			/* UNREACHABLE */
			break;
		}
		break;

	case MovCrAccessType::kMoveFromCr:
		switch (exit_qualification.fields.control_register) {
			// Reg <- CR3
		case 3: {

			*register_used = Asm_VmRead((ULONG_PTR)VmcsField::kGuestCr3);
			break;
		}

			// Reg <- CR8
		case 8: {

			*register_used = guest_context->cr8;
			break;
		}

		default:
			/* UNREACHABLE */
			break;
		}
		break;

		// Unimplemented
	case MovCrAccessType::kClts:
	case MovCrAccessType::kLmsw:
		break;
	}

	VmmpAdjustGuestInstructionPointer(guest_context);
}

UCHAR VmmpGetGuestCpl() {
	VmxRegmentDescriptorAccessRight ar = {
		static_cast<unsigned int>(Asm_VmRead((ULONG32)VmcsField::kGuestSsArBytes)) };
	return ar.fields.dpl;
}

void VmmpHandleDrAccess(
	GuestContext *guest_context) {
	// Normally, when the privileged instruction is executed at CPL3, #GP(0)
	// occurs instead of VM-exit. However, access to the debug registers is
	// exception. Inject #GP(0) in such case to emulate what the processor
	// normally does. See: Instructions That Cause VM Exits Conditionally
	if (VmmpGetGuestCpl() != 0) {
		VmmpInjectInterruption(InterruptionType::kHardwareException,
			InterruptionVector::kGeneralProtectionException,
			true, 0);
		return;
	}

	const MovDrQualification exit_qualification = { Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification) };
	auto debugl_register = exit_qualification.fields.debugl_register;

	// Access to DR4 and 5 causes #UD when CR4.DE (Debugging Extensions) is set.
	// Otherwise, these registers are aliased to DR6 and 7 respectively.
	// See: Debug Registers DR4 and DR5
	if (debugl_register == 4 || debugl_register == 5) {
		const Cr4 guest_cr4 = { Asm_VmRead((ULONG_PTR)VmcsField::kGuestCr4) };
		if (guest_cr4.fields.de) {
			VmmpInjectInterruption(InterruptionType::kHardwareException,
				InterruptionVector::kInvalidOpcodeException, false,
				0);
			return;
		}
		else if (debugl_register == 4) {
			debugl_register = 6;
		}
		else {
			debugl_register = 7;
		}
	}

	// Access to any of DRs causes #DB when DR7.GD (General Detect Enable) is set.
	// See: Debug Control Register (DR7)
	Dr7 guest_dr7 = { Asm_VmRead((ULONG_PTR)VmcsField::kGuestDr7) };
	if (guest_dr7.fields.gd) {
		Dr6 guest_dr6 = { Asm_readDr6() };
		// Clear DR6.B0-3 since the #DB being injected is not due to match of a
		// condition specified in DR6. The processor is allowed to clear those bits
		// as "Certain debug exceptions may clear bits 0-3."
		guest_dr6.fields.b0 = false;
		guest_dr6.fields.b1 = false;
		guest_dr6.fields.b2 = false;
		guest_dr6.fields.b3 = false;
		// "When such a condition is detected, the BD flag in debug status register
		// DR6 is set prior to generating the exception."
		guest_dr6.fields.bd = true;
		Asm_writeDr6(guest_dr6.all);

		VmmpInjectInterruption(InterruptionType::kHardwareException,
			InterruptionVector::kDebugException, false, 0);

		// While the processor clears the DR7.GD bit on #DB ("The processor clears
		// the GD flag upon entering to the debug exception handler"), it does not
		// change that in the VMCS. Emulate that behavior here. Note that this bit
		// should actually be cleared by intercepting #DB and in the handler instead
		// of here, since the processor clears it on any #DB. We do not do that as
		// we do not intercept #DB as-is.
		guest_dr7.fields.gd = false;
		Asm_vmxWrite((ULONG32)VmcsField::kGuestDr7, guest_dr7.all);
		return;
	}

	const auto register_used =
		VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);
	const auto direction =
		static_cast<MovDrDirection>(exit_qualification.fields.direction);

	// In 64-bit mode, the upper 32 bits of DR6 and DR7 are reserved and must be
	// written with zeros. Writing 1 to any of the upper 32 bits results in a
	// #GP(0) exception. See: Debug Registers and Intel® 64 Processors
	if (IsX64() && direction == MovDrDirection::kMoveToDr) {
		const auto value64 = static_cast<ULONG64>(*register_used);
		if ((debugl_register == 6 || debugl_register == 7) && (value64 >> 32)) {
			VmmpInjectInterruption(InterruptionType::kHardwareException,
				InterruptionVector::kGeneralProtectionException,
				true, 0);
			return;
		}
	}

	switch (direction) {
	case MovDrDirection::kMoveToDr:
		switch (debugl_register) {
			// clang-format off
		case 0: Asm_writeDr0(*register_used); break;
		case 1: Asm_writeDr1(*register_used); break;
		case 2: Asm_writeDr2(*register_used); break;
		case 3: Asm_writeDr3(*register_used); break;
			// clang-format on
		case 6: {
			// Make sure that we write 0 and 1 into the bits that are stated to be
			// so. The Intel SDM does not appear to state what happens when the
			// processor attempts to write 1 to the always 0 bits, and vice versa,
			// however, observation is that writes to those bits are ignored
			// *as long as it is done on the non-root mode*, and other hypervisors
			// emulate in that way as well.
			Dr6 write_value = { *register_used };
			write_value.fields.reserved1 |= ~write_value.fields.reserved1;
			write_value.fields.reserved2 = 0;
			write_value.fields.reserved3 |= ~write_value.fields.reserved3;
			Asm_writeDr6(write_value.all);
			break;
		}
		case 7: {
			// Similar to the case of CR6, enforce always 1 and 0 behavior.
			Dr7 write_value = { *register_used };
			write_value.fields.reserved1 |= ~write_value.fields.reserved1;
			write_value.fields.reserved2 = 0;
			write_value.fields.reserved3 = 0;
			Asm_vmxWrite((ULONG32)VmcsField::kGuestDr7, write_value.all);
			break;
		}
		default:
			break;
		}
		break;
	case MovDrDirection::kMoveFromDr:
		// clang-format off
		switch (debugl_register) {
		case 0: *register_used = Asm_readDr0(); break;
		case 1: *register_used = Asm_readDr1(); break;
		case 2: *register_used = Asm_readDr2(); break;
		case 3: *register_used = Asm_readDr3(); break;
		case 6: *register_used = Asm_readDr6(); break;
		case 7: *register_used = Asm_VmRead((ULONG_PTR)VmcsField::kGuestDr7); break;
		default: break;
		}
		// clang-format on
		break;
	}

	VmmpAdjustGuestInstructionPointer(guest_context);
}

ULONG_PTR VmmpGetKernelCr3() {
	ULONG_PTR guest_cr3 = 0;
	static const long kDirectoryTableBaseOffset = IsX64() ? 0x28 : 0x18;
	if (IsX64()) {
		// On x64, assume it is an user-mode CR3 when the lowest bit is set. If so,
		// get CR3 from _KPROCESS::DirectoryTableBase.
		guest_cr3 = Asm_VmRead((ULONG_PTR)VmcsField::kGuestCr3);
		if (guest_cr3 & 1) {
			const auto process = reinterpret_cast<PUCHAR>(PsGetCurrentProcess());
			guest_cr3 =
				*reinterpret_cast<PULONG_PTR>(process + kDirectoryTableBaseOffset);
		}
	}
	else {
		// On x86, there is no easy way to tell whether the CR3 taken from VMCS is
		// a user-mode CR3 or kernel-mode CR3 by only looking at the value.
		// Therefore, we simply use _KPROCESS::DirectoryTableBase always.
		const auto process = reinterpret_cast<PUCHAR>(PsGetCurrentProcess());
		guest_cr3 =
			*reinterpret_cast<PULONG_PTR>(process + kDirectoryTableBaseOffset);
	}
	return guest_cr3;
}

void VmmpIoWrapper(bool to_memory, bool is_string,
	SIZE_T size_of_access,
	unsigned short port,
	void *address,
	unsigned long count) {
	NT_ASSERT(size_of_access == 1 || size_of_access == 2 || size_of_access == 4);

	// Update CR3 with that of the guest since below code is going to access
	// memory.
	const auto guest_cr3 = VmmpGetKernelCr3();
	const auto vmm_cr3 = Asm_readCr3();
	Asm_WriteCr3(guest_cr3);

	// clang-format off
	if (to_memory) {
		if (is_string) {
			// INS
			switch (size_of_access) {
			case 1: __inbytestring(port, static_cast<UCHAR*>(address), count); break;
			case 2: __inwordstring(port, static_cast<USHORT*>(address), count); break;
			case 4: __indwordstring(port, static_cast<ULONG*>(address), count); break;
			}
		}
		else {
			// IN
			switch (size_of_access) {
			case 1: *static_cast<UCHAR*>(address) = __inbyte(port); break;
			case 2: *static_cast<USHORT*>(address) = __inword(port); break;
			case 4: *static_cast<ULONG*>(address) = __indword(port); break;
			}
		}
	}
	else {
		if (is_string) {
			// OUTS
			switch (size_of_access) {
			case 1: __outbytestring(port, static_cast<UCHAR*>(address), count); break;
			case 2: __outwordstring(port, static_cast<USHORT*>(address), count); break;
			case 4: __outdwordstring(port, static_cast<ULONG*>(address), count); break;
			}
		}
		else {
			// OUT
			switch (size_of_access) {
			case 1: __outbyte(port, *static_cast<UCHAR*>(address)); break;
			case 2: __outword(port, *static_cast<USHORT*>(address)); break;
			case 4: __outdword(port, *static_cast<ULONG*>(address)); break;
			}
		}
	}
	// clang-format on

	Asm_WriteCr3(vmm_cr3);
}

void VmmpHandleIoPort(GuestContext *guest_context) {
	const IoInstQualification exit_qualification = {Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification) };

	const auto is_in = exit_qualification.fields.direction == 1;  // to memory?
	const auto is_string = exit_qualification.fields.string_instruction == 1;
	const auto is_rep = exit_qualification.fields.rep_prefixed == 1;
	const auto port = static_cast<USHORT>(exit_qualification.fields.port_number);
	const auto string_address = reinterpret_cast<void *>(
		(is_in) ? guest_context->gp_regs->di : guest_context->gp_regs->si);
	const auto count =
		static_cast<unsigned long>((is_rep) ? guest_context->gp_regs->cx : 1);
	const auto address =
		(is_string) ? string_address : &guest_context->gp_regs->ax;

	SIZE_T size_of_access = 0;
	const char *suffix = "";
	switch (static_cast<IoInstSizeOfAccess>(
		exit_qualification.fields.size_of_access)) {
	case IoInstSizeOfAccess::k1Byte:
		size_of_access = 1;
		suffix = "B";
		break;
	case IoInstSizeOfAccess::k2Byte:
		size_of_access = 2;
		suffix = "W";
		break;
	case IoInstSizeOfAccess::k4Byte:
		size_of_access = 4;
		suffix = "D";
		break;
	}

	VmmpIoWrapper(is_in, is_string, size_of_access, port, address, count);

	// Update RCX, RDI and RSI accordingly. Note that this code can handle only
	// the REP prefix.
	if (is_string) {
		const auto update_count = (is_rep) ? guest_context->gp_regs->cx : 1;
		const auto update_size = update_count * size_of_access;
		const auto update_register =
			(is_in) ? &guest_context->gp_regs->di : &guest_context->gp_regs->si;

		if (guest_context->flag_reg.fields.df) {
			*update_register = *update_register - update_size;
		}
		else {
			*update_register = *update_register + update_size;
		}

		if (is_rep) {
			guest_context->gp_regs->cx = 0;
		}
	}

	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleMsrAccess(
	GuestContext *guest_context, bool read_access) {
	// Apply it for VMCS instead of a real MSR if a specified MSR is either of
	// them.
	const auto msr = static_cast<Msr>(guest_context->gp_regs->cx);

	bool transfer_to_vmcs = false;
	VmcsField vmcs_field = {};
	switch (msr) {
	case Msr::kIa32SysenterCs:
		vmcs_field = VmcsField::kGuestSysenterCs;
		transfer_to_vmcs = true;
		break;
	case Msr::kIa32SysenterEsp:
		vmcs_field = VmcsField::kGuestSysenterEsp;
		transfer_to_vmcs = true;
		break;
	case Msr::kIa32SysenterEip:
		vmcs_field = VmcsField::kGuestSysenterEip;
		transfer_to_vmcs = true;
		break;
	case Msr::kIa32Debugctl:
		vmcs_field = VmcsField::kGuestIa32Debugctl;
		transfer_to_vmcs = true;
		break;
	case Msr::kIa32GsBase:
		vmcs_field = VmcsField::kGuestGsBase;
		transfer_to_vmcs = true;
		break;
	case Msr::kIa32FsBase:
		vmcs_field = VmcsField::kGuestFsBase;
		transfer_to_vmcs = true;
		break;
	default:
		break;
	}

	const auto is_64bit_vmcs =
		UtilIsInBounds(vmcs_field, VmcsField::kIoBitmapA,
		VmcsField::kHostIa32PerfGlobalCtrlHigh);

	LARGE_INTEGER msr_value = {};
	if (read_access) {
		if (transfer_to_vmcs) {
			if (is_64bit_vmcs) {
					msr_value.QuadPart = Asm_VmRead((ULONG32)vmcs_field);
			}
			else {
					msr_value.QuadPart = Asm_VmRead((ULONG32)vmcs_field);
			}
		}
		else {
			Asm_rdmsr((ULONG32)msr, (ULONG64 *)&msr_value.QuadPart);
		}
		guest_context->gp_regs->ax = msr_value.LowPart;
		guest_context->gp_regs->dx = msr_value.HighPart;
	}
	else {
		msr_value.LowPart = static_cast<ULONG>(guest_context->gp_regs->ax);
		msr_value.HighPart = static_cast<ULONG>(guest_context->gp_regs->dx);
		if (transfer_to_vmcs) {
			if (is_64bit_vmcs) {
				Asm_vmxWrite((ULONG32)vmcs_field, static_cast<ULONG_PTR>(msr_value.QuadPart));
			}
			else {
				Asm_vmxWrite((ULONG32)vmcs_field, static_cast<ULONG_PTR>(msr_value.QuadPart));
			}
		}
		else {
			Asm_wrmsr((ULONG32)msr, static_cast<ULONG_PTR>(msr_value.QuadPart));
		}
	}

	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleMsrReadAccess(GuestContext *guest_context) {

	VmmpHandleMsrAccess(guest_context, true);
}

void VmmpHandleMsrWriteAccess(
	GuestContext *guest_context) {

	VmmpHandleMsrAccess(guest_context, false);
}

void VmmpHandleMonitorTrap(GuestContext* guest_context) {
	/*RefreshStatusUserProcHookUsingNowByCr3(guest_context, Asm_VmRead((ULONG_PTR)VmcsField::kGuestCr3));*/
	//ResetUserProcHook(guest_context);
	DisableMTF();
}

void VmmpHandleGdtrOrIdtrAccess(
	GuestContext *guest_context) {

	const GdtrOrIdtrInstInformation instruction_info = {
		static_cast<ULONG32>(Asm_VmRead((ULONG_PTR)VmcsField::kVmxInstructionInfo)) };

	// Calculate an address to be used for the instruction
	const auto displacement = Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification);

	// Base
	ULONG_PTR base_value = 0;
	if (!instruction_info.fields.base_register_invalid) {
		const auto register_used = VmmpSelectRegister(
			instruction_info.fields.base_register, guest_context);
		base_value = *register_used;
	}

	// Index
	ULONG_PTR index_value = 0;
	if (!instruction_info.fields.index_register_invalid) {
		const auto register_used = VmmpSelectRegister(
			instruction_info.fields.index_register, guest_context);
		index_value = *register_used;
		switch (static_cast<Scaling>(instruction_info.fields.scalling)) {
		case Scaling::kScaleBy2:
			index_value = index_value * 2;
			break;
		case Scaling::kScaleBy4:
			index_value = index_value * 4;
			break;
		case Scaling::kScaleBy8:
			index_value = index_value * 8;
			break;
		default:
			break;
		}
	}

	// clang-format off
	ULONG_PTR segment_base = 0;
	switch (instruction_info.fields.segment_register) {
	case 0: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestEsBase); break;
	case 1: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestCsBase); break;
	case 2: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestSsBase); break;
	case 3: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestDsBase); break;
	case 4: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestFsBase); break;
	case 5: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestGsBase); break;
	//default: HYPERPLATFORM_COMMON_DBG_BREAK(); break;
	}
	// clang-format on

	auto operation_address =
		segment_base + base_value + index_value + displacement;
	if (static_cast<AddressSize>(instruction_info.fields.address_size) ==
		AddressSize::k32bit) {
		operation_address &= MAXULONG;
	}

	// Update CR3 with that of the guest since below code is going to access
	// memory.
	const auto guest_cr3 = VmmpGetKernelCr3();
	const auto vmm_cr3 = Asm_readCr3();
	Asm_WriteCr3(guest_cr3);

	// Emulate the instruction
	auto descriptor_table_reg = reinterpret_cast<Idtr *>(operation_address);
	switch (static_cast<GdtrOrIdtrInstructionIdentity>(
		instruction_info.fields.instruction_identity)) {
	case GdtrOrIdtrInstructionIdentity::kSgdt: {
		// On 64bit system, SIDT and SGDT can be executed from a 32bit process
		// where runs with the 32bit operand size. The following checks the
		// current guest's operand size and writes either full 10 bytes (for the
		// 64bit more) or 6 bytes or IDTR or GDTR as the processor does. See:
		// Operand Size and Address Size in 64-Bit Mode See: SGDT-Store Global
		// Descriptor Table Register See: SIDT-Store Interrupt Descriptor Table
		// Register
		const auto gdt_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestGdtrBase);
		const auto gdt_limit =
			static_cast<unsigned short>(Asm_VmRead((ULONG_PTR)VmcsField::kGuestGdtrLimit));

		const SegmentSelector ss = {
			static_cast<USHORT>(Asm_VmRead((ULONG_PTR)VmcsField::kGuestCsSelector)) };
		const auto segment_descriptor = reinterpret_cast<SegmentDescriptor *>(
			gdt_base + ss.fields.index * sizeof(SegmentDescriptor));
		if (segment_descriptor->fields.l) {
			// 64bit
			descriptor_table_reg->base = gdt_base;
			descriptor_table_reg->limit = gdt_limit;
		}
		else {
			// 32bit
			const auto descriptor_table_reg32 =
				reinterpret_cast<Idtr32 *>(descriptor_table_reg);
			descriptor_table_reg32->base = static_cast<ULONG32>(gdt_base);
			descriptor_table_reg32->limit = gdt_limit;
		}
		break;
	}
	case GdtrOrIdtrInstructionIdentity::kSidt: {
		const auto idt_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestIdtrBase);
		const auto idt_limit =
			static_cast<unsigned short>(Asm_VmRead((ULONG_PTR)VmcsField::kGuestIdtrLimit));

		const auto gdt_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestGdtrBase);
		const SegmentSelector ss = {
			static_cast<USHORT>(Asm_VmRead((ULONG_PTR)VmcsField::kGuestCsSelector)) };
		const auto segment_descriptor = reinterpret_cast<SegmentDescriptor *>(
			gdt_base + ss.fields.index * sizeof(SegmentDescriptor));
		if (segment_descriptor->fields.l) {
			// 64bit
			descriptor_table_reg->base = idt_base;
			descriptor_table_reg->limit = idt_limit;
		}
		else {
			// 32bit
			const auto descriptor_table_reg32 =
				reinterpret_cast<Idtr32 *>(descriptor_table_reg);
			descriptor_table_reg32->base = static_cast<ULONG32>(idt_base);
			descriptor_table_reg32->limit = idt_limit;
		}
		break;
	}
	case GdtrOrIdtrInstructionIdentity::kLgdt:
		Asm_vmxWrite((ULONG32)VmcsField::kGuestGdtrBase, descriptor_table_reg->base);
		Asm_vmxWrite((ULONG32)VmcsField::kGuestGdtrLimit, descriptor_table_reg->limit);
		break;
	case GdtrOrIdtrInstructionIdentity::kLidt:
		Asm_vmxWrite((ULONG32)VmcsField::kGuestIdtrBase, descriptor_table_reg->base);
		Asm_vmxWrite((ULONG32)VmcsField::kGuestIdtrLimit, descriptor_table_reg->limit);
		break;
	}

	Asm_WriteCr3(vmm_cr3);
	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleLdtrOrTrAccess(
	GuestContext *guest_context) {

	const LdtrOrTrInstInformation instruction_info = {
		static_cast<ULONG32>(Asm_VmRead((ULONG_PTR)VmcsField::kVmxInstructionInfo)) };

	// Calculate an address or a register to be used for the instruction
	const auto displacement = Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification);

	ULONG_PTR operation_address = 0;
	if (instruction_info.fields.register_access) {
		// Register
		const auto register_used =
			VmmpSelectRegister(instruction_info.fields.register1, guest_context);
		operation_address = reinterpret_cast<ULONG_PTR>(register_used);
	}
	else {
		// Base
		ULONG_PTR base_value = 0;
		if (!instruction_info.fields.base_register_invalid) {
			const auto register_used = VmmpSelectRegister(
				instruction_info.fields.base_register, guest_context);
			base_value = *register_used;
		}

		// Index
		ULONG_PTR index_value = 0;
		if (!instruction_info.fields.index_register_invalid) {
			const auto register_used = VmmpSelectRegister(
				instruction_info.fields.index_register, guest_context);
			index_value = *register_used;
			switch (static_cast<Scaling>(instruction_info.fields.scalling)) {
			case Scaling::kScaleBy2:
				index_value = index_value * 2;
				break;
			case Scaling::kScaleBy4:
				index_value = index_value * 4;
				break;
			case Scaling::kScaleBy8:
				index_value = index_value * 8;
				break;
			default:
				break;
			}
		}

		// clang-format off
		ULONG_PTR segment_base = 0;
		switch (instruction_info.fields.segment_register) {
		case 0: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestEsBase); break;
		case 1: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestCsBase); break;
		case 2: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestSsBase); break;
		case 3: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestDsBase); break;
		case 4: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestFsBase); break;
		case 5: segment_base = Asm_VmRead((ULONG_PTR)VmcsField::kGuestGsBase); break;
		default: /*HYPERPLATFORM_COMMON_DBG_BREAK();*/ break;
		}
		// clang-format on

		operation_address = segment_base + base_value + index_value + displacement;
		if (static_cast<AddressSize>(instruction_info.fields.address_size) ==
			AddressSize::k32bit) {
			operation_address &= MAXULONG;
		}
	}

	// Update CR3 with that of the guest since below code is going to access
	// memory.
	const auto guest_cr3 = VmmpGetKernelCr3();
	const auto vmm_cr3 = Asm_readCr3();
	Asm_WriteCr3(guest_cr3);

	// Emulate the instruction
	auto selector = reinterpret_cast<USHORT *>(operation_address);
	switch (static_cast<LdtrOrTrInstructionIdentity>(
		instruction_info.fields.instruction_identity)) {
	case LdtrOrTrInstructionIdentity::kSldt:
		*selector =
			static_cast<USHORT>(Asm_VmRead((ULONG_PTR)VmcsField::kGuestLdtrSelector));
		break;
	case LdtrOrTrInstructionIdentity::kStr:
		*selector = static_cast<USHORT>(Asm_VmRead((ULONG_PTR)VmcsField::kGuestTrSelector));
		break;
	case LdtrOrTrInstructionIdentity::kLldt:
		Asm_vmxWrite((ULONG32)VmcsField::kGuestLdtrSelector, *selector);
		break;
	case LdtrOrTrInstructionIdentity::kLtr: {
		Asm_vmxWrite((ULONG32)VmcsField::kGuestTrSelector, *selector);
		// Set the Busy bit in TSS.
		// See: LTR - Load Task Register
		const SegmentSelector ss = { *selector };
		const auto sd = reinterpret_cast<SegmentDescriptor *>(
			Asm_VmRead((ULONG_PTR)VmcsField::kGuestGdtrBase) +
			ss.fields.index * sizeof(SegmentDescriptor));
		sd->fields.type |= 2;  // Set the Busy bit
		break;
	}
	}

	Asm_WriteCr3(vmm_cr3);
	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpIndicateUnsuccessfulVmcall(
	GuestContext *guest_context) {
	UNREFERENCED_PARAMETER(guest_context);

	VmmpInjectInterruption(InterruptionType::kHardwareException,
		InterruptionVector::kInvalidOpcodeException, false, 0);
	const auto exit_inst_length = Asm_VmRead((ULONG32)VmcsField::kVmExitInstructionLen);
	Asm_vmxWrite((ULONG32)VmcsField::kVmEntryInstructionLen, exit_inst_length);
}

void VmmpHandleVmCallTermination(
	GuestContext *guest_context, void *context) {
	// The processor sets ffff to limits of IDT and GDT when VM-exit occurred.
	// It is not correct value but fine to ignore since vmresume loads correct
	// values from VMCS. But here, we are going to skip vmresume and simply
	// return to where VMCALL is executed. It results in keeping those broken
	// values and ends up with bug check 109, so we should fix them manually.
	const auto gdt_limit = Asm_VmRead((ULONG32)VmcsField::kGuestGdtrLimit);
	const auto gdt_base = Asm_VmRead((ULONG32)VmcsField::kGuestGdtrBase);
	const auto idt_limit = Asm_VmRead((ULONG32)VmcsField::kGuestIdtrLimit);
	const auto idt_base = Asm_VmRead((ULONG32)VmcsField::kGuestIdtrBase);
	Gdtr gdtr = { static_cast<USHORT>(gdt_limit), gdt_base };
	Idtr idtr = { static_cast<USHORT>(idt_limit), idt_base };
	Asm_LGDT(&gdtr);
	Asm_LIDT(&idtr);

	// Store an address of the management structure to the context parameter
	const auto result_ptr = static_cast<ProcessorData **>(context);
	*result_ptr = guest_context->stack->processor_data;
	("Context at %p %p", context,
		guest_context->stack->processor_data);

	// Set rip to the next instruction of VMCALL
	const auto exit_instruction_length =
		Asm_VmRead((ULONG32)VmcsField::kVmExitInstructionLen);
	const auto return_address = guest_context->ip + exit_instruction_length;

	// Since the flag register is overwritten after VMXOFF, we should manually
	// indicates that VMCALL was successful by clearing those flags.
	// See: CONVENTIONS
	guest_context->flag_reg.fields.cf = false;
	guest_context->flag_reg.fields.pf = false;
	guest_context->flag_reg.fields.af = false;
	guest_context->flag_reg.fields.zf = false;
	guest_context->flag_reg.fields.sf = false;
	guest_context->flag_reg.fields.of = false;
	guest_context->flag_reg.fields.cf = false;
	guest_context->flag_reg.fields.zf = false;

	// Set registers used after VMXOFF to recover the context. Volatile
	// registers must be used because those changes are reflected to the
	// guest's context after VMXOFF.
	guest_context->gp_regs->cx = return_address;
	guest_context->gp_regs->dx = guest_context->gp_regs->sp;
	guest_context->gp_regs->ax = guest_context->flag_reg.all;
	guest_context->vm_continue = false;
}

void VmmpIndicateSuccessfulVmcall(
	GuestContext *guest_context) {
	// See: CONVENTIONS
	guest_context->flag_reg.fields.cf = false;
	guest_context->flag_reg.fields.pf = false;
	guest_context->flag_reg.fields.af = false;
	guest_context->flag_reg.fields.zf = false;
	guest_context->flag_reg.fields.sf = false;
	guest_context->flag_reg.fields.of = false;
	guest_context->flag_reg.fields.cf = false;
	guest_context->flag_reg.fields.zf = false;
	Asm_vmxWrite((ULONG32)VmcsField::kGuestRflags, guest_context->flag_reg.all);
	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleVmCall(
	GuestContext *guest_context) {
	// VMCALL convention for HyperPlatform:
	//  ecx: hyper-call number (always 32bit)
	//  edx: arbitrary context parameter (pointer size)
	// Any unsuccessful VMCALL will inject #UD into a guest
	const auto hypercall_number =
		static_cast<HypercallNumber>(guest_context->gp_regs->cx);
	const auto context = reinterpret_cast<void *>(guest_context->gp_regs->dx);

	if (!UtilIsInBounds(hypercall_number,
		HypercallNumber::kMinimumHypercallNumber,
		HypercallNumber::kMaximumHypercallNumber)) {
		// Unsupported hypercall
		VmmpIndicateUnsuccessfulVmcall(guest_context);
	}

	switch (hypercall_number) {
	case HypercallNumber::kTerminateVmm:
		// Unloading requested. This VMCALL is allowed to execute only from CPL=0
		if (VmmpGetGuestCpl() == 0) {
			VmmpHandleVmCallTermination(guest_context, context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kPingVmm:
		// Sample VMCALL handler
		("Pong by VMM! (context = %p)", context);
		VmmpIndicateSuccessfulVmcall(guest_context);
		break;
	case HypercallNumber::kGetSharedProcessorData:
		*static_cast<void **>(context) =
			guest_context->stack->processor_data->shared_data;
		VmmpIndicateSuccessfulVmcall(guest_context);
		break;
	case HypercallNumber::kHookNtOpenProcess:
		if (VmmpGetGuestCpl() == 0) {
			if (HookApiNameByEpt(guest_context, pFunctionOpenProcess, (ULONG_PTR)NtOpenProcessDealFunction))
			{
				KdPrint(("[Whisper]Hook NtOpenProcess Successfully!\n"));
			}
			else {
				KdPrint(("[Whisper]Hook NtOpenProcess Failed!\n"));
			}

			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kUnHookNtOpenProcess:
		if (VmmpGetGuestCpl() == 0) {
			UNICODE_STRING dtUnicodeString;
			RtlInitUnicodeString(&dtUnicodeString, L"NtOpenProcess");
			if (pFunctionOpenProcess) {
				PHYSICAL_ADDRESS dtPhyAddress = MmGetPhysicalAddress(pFunctionOpenProcess);
				if (UnHookPageByEpt(guest_context, dtPhyAddress))
				{
					KdPrint(("[Whisper]UnHook Successfully!\n"));
				}
				else {
					KdPrint(("[Whisper]UnHook Failed!\n"));
				}
			}
			else {
				KdPrint(("[Whisper]UnHook Failed!\n"));
			}

			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kHookNtIofCallDriver:
		if (VmmpGetGuestCpl() == 0) {
			if (HookApiNameByEpt(guest_context, pFunctionIofCallDriver, (ULONG_PTR)IofCallDriverDealFunction))
			{
				KdPrint(("[Whisper]Hook IofCallDriver Successfully!\n"));
			}
			else {
				KdPrint(("[Whisper]Hook IofCallDriver Failed!\n"));
			}
			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kUnHookIofCallDriver:
		if (VmmpGetGuestCpl() == 0) {
			if (pFunctionIofCallDriver) {
				PHYSICAL_ADDRESS dtPhyAddress = MmGetPhysicalAddress(pFunctionIofCallDriver);
				if (UnHookPageByEpt(guest_context, dtPhyAddress))
				{
					KdPrint(("[Whisper]UnHook IofCallDriver Successfully!\n"));
				}
				else {
					KdPrint(("[Whisper]UnHook IofCallDriver Failed!\n"));
				}
			}
			else {
				KdPrint(("[Whisper]UnHook IofCallDriver Failed!\n"));
			}
			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kAddAntiOpenProcID:
		// Present existence of a hypervisor using the HypervisorPresent bit
		if (VmmpGetGuestCpl() == 0) {
			ULONG_PTR nPID = ((ULONG_PTR*)(context))[0];
			bool bFind = false;
			KdPrint(("[Whisper]ADD PID: 0x%I64X Entry\n", nPID));
			if (guest_context->stack->processor_data->pFirstHookPid != 0)
			{
				for (struct HookPid* i = guest_context->stack->processor_data->pFirstHookPid; i != 0; i = i->pNext)
				{
					if (nPID == i->nPid)
					{
						KdPrint(("[Whisper]Has Find PID: 0x%I64X\n", nPID));
						bFind = true;
						break;
					}
				}
			}
			if (!bFind)
			{
				struct HookPid* pCurrent = (struct HookPid*)ExAllocatePoolWithTag(NonPagedPool, sizeof(struct HookPid), kHyperPlatformCommonPoolTag);
				if (pCurrent != NULL)
				{
					pCurrent->nPid = nPID;
					pCurrent->pNext = guest_context->stack->processor_data->pFirstHookPid;
					if (guest_context->stack->processor_data->pFirstHookPid != 0) {
						guest_context->stack->processor_data->pFirstHookPid->pLast = pCurrent;
					}
					guest_context->stack->processor_data->pFirstHookPid = pCurrent;
					KdPrint(("[Whisper]Has added PID: 0x%I64X\n", nPID));
				}
			}
			KdPrint(("[Whisper]ADD PID: 0x%I64X End\n", nPID));
			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;

	case HypercallNumber::kDeleteAntiOpenProcID:
		// Present existence of a hypervisor using the HypervisorPresent bit
		if (VmmpGetGuestCpl() == 0) {
			ULONG_PTR nPID = ((ULONG_PTR*)(context))[0];
			KdPrint(("[Whisper]Delete PID: 0x%I64X Entry\n", nPID));
			if (guest_context->stack->processor_data->pFirstHookPid != 0)
			{
				for (struct HookPid* i = guest_context->stack->processor_data->pFirstHookPid; i != 0; i = i->pNext)
				{
					if (nPID == i->nPid)
					{
						if (i->pLast == 0)
						{
							guest_context->stack->processor_data->pFirstHookPid = i->pNext;
						}
						else {
							i->pLast->pNext = i->pNext;
						}

						if (i->pNext != 0)
						{
							i->pNext->pLast = i->pLast;
						}

						ExFreePoolWithTag(i, kHyperPlatformCommonPoolTag);

						KdPrint(("[Whisper]Has Delete PID: 0x%I64X\n", nPID));
						break;
					}
				}
			}

			KdPrint(("[Whisper]Delete PID: 0x%I64X End\n", nPID));
			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kAddHookUserProcID:
		// Present existence of a hypervisor using the HypervisorPresent bit
		if (VmmpGetGuestCpl() == 0) {
			//PsLookupProcessByProcessId()
			//PsLookupThreadByThreadId()
			PETHREAD pThread = 0;
			PEPROCESS pEprocess = 0;
			if (!FindRing3FunctionIsHooked(&(guest_context->stack->processor_data->pRing3HookData), (((ULONG_PTR*)(context))[1]), (((ULONG_PTR*)(context))[3])))
			{
				PHOOK_EPT_STRUCT pHookEptStruct = (PHOOK_EPT_STRUCT)ExAllocatePoolWithTag(NonPagedPool, sizeof(HOOK_EPT_STRUCT), kHyperPlatformCommonPoolTag);
				if (pHookEptStruct)
				{
					pHookEptStruct->bMTFing = false;
					pHookEptStruct->bUsingNow = true;
					pHookEptStruct->nHookRingValue = 3;
					pHookEptStruct->nProcessPid = (((ULONG_PTR*)(context))[3]);
					pHookEptStruct->pHookVA = (((ULONG_PTR*)(context))[2]);
					pHookEptStruct->pOriginalVA = (((ULONG_PTR*)(context))[1]);
					KdPrint(("[whipser]HOOK %p %p", pHookEptStruct->pOriginalVA, pHookEptStruct->pHookVA));
					InsertHookStructIntoList(&(guest_context->stack->processor_data->pRing3HookData), pHookEptStruct);
				}
			}

			//KTRAP_FRAME
			//guest_context->flag_reg.fields.tf
			//pThread.
			/*if (HookUserProcessByEpt(guest_context, (HANDLE)(((ULONG_PTR*)(context))[0]), (ULONG_PTR)(((ULONG_PTR*)(context))[1]), (ULONG_PTR)(((ULONG_PTR*)(context))[2]), (ULONG_PTR)(((ULONG_PTR*)(context))[3])))
			{
				KdPrint(("[Whisper]Hook User Proc Successfully! 0x%p \n", guest_context->stack->processor_data->pRing3HookData));
			}
			else {
				KdPrint(("[Whisper]Hook User Proc Failed!\n"));
			}*/
			VmmpIndicateSuccessfulVmcall(guest_context);
		}
		else {
			VmmpIndicateUnsuccessfulVmcall(guest_context);
		}
		break;
	case HypercallNumber::kDeleteHookUserProcID:
		VmmpIndicateSuccessfulVmcall(guest_context);
		break;
	default:
		VmmpIndicateUnsuccessfulVmcall(guest_context);
	}
}

void VmmpHandleVmx(GuestContext *guest_context) {

	// See: CONVENTIONS
	guest_context->flag_reg.fields.cf = true;  // Error without status
	guest_context->flag_reg.fields.pf = false;
	guest_context->flag_reg.fields.af = false;
	guest_context->flag_reg.fields.zf = false;  // Error without status
	guest_context->flag_reg.fields.sf = false;
	guest_context->flag_reg.fields.of = false;
	Asm_vmxWrite((ULONG32)VmcsField::kGuestRflags, guest_context->flag_reg.all);
	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleRdtscp(
	GuestContext *guest_context) {
	unsigned int tsc_aux = 0;
	ULARGE_INTEGER tsc = {};
	tsc.QuadPart = Asm_Rdtscp(&tsc_aux);
	guest_context->gp_regs->dx = tsc.HighPart;
	guest_context->gp_regs->ax = tsc.LowPart;
	guest_context->gp_regs->cx = tsc_aux;

	VmmpAdjustGuestInstructionPointer(guest_context);
}

void VmmpHandleXsetbv(
	GuestContext *guest_context) {
	ULARGE_INTEGER value = {};
	value.LowPart = static_cast<ULONG>(guest_context->gp_regs->ax);
	value.HighPart = static_cast<ULONG>(guest_context->gp_regs->dx);
	Asm_Xsetbv(static_cast<ULONG>(guest_context->gp_regs->cx), value.QuadPart);

	VmmpAdjustGuestInstructionPointer(guest_context);
}

void EptHandleEptViolation(ProcessorData *PprocessorData) {
	EptData* ept_data = PprocessorData->ept_data;
	const EptViolationQualification exit_qualification = { Asm_VmRead((ULONG_PTR)VmcsField::kExitQualification) };

	const auto fault_pa = Asm_VmRead((ULONG_PTR)VmcsField::kGuestPhysicalAddress);
	const auto fault_va = reinterpret_cast<void *>(
		exit_qualification.fields.valid_guest_linear_address ? Asm_VmRead((ULONG_PTR)VmcsField::kGuestLinearAddress)
		: 0);

	const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
	if (ept_entry && ept_entry->all) {
		//HYPERPLATFORM_COMMON_DBG_BREAK();
		/*if (exit_qualification.fields.read_access && exit_qualification.fields.ept_readable == 0)
		{
			ept_entry->fields.read_access = 1;
			KdPrint(("[Wispher]Cpu:%d fix read access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
		} else if (exit_qualification.fields.write_access && exit_qualification.fields.ept_writeable == 0)
		{
			ept_entry->fields.write_access = 1;
			KdPrint(("[Wispher]Cpu:%d fix write access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
		} else if (exit_qualification.fields.execute_access &&exit_qualification.fields.ept_executable == 0)
		{
			ept_entry->fields.execute_access = 1;
			KdPrint(("[Wispher]Cpu:%d fix execute access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
		}*/

		if ((exit_qualification.fields.read_access && exit_qualification.fields.ept_readable == 0) || \
			(exit_qualification.fields.write_access && exit_qualification.fields.ept_writeable == 0) || \
			(exit_qualification.fields.execute_access && exit_qualification.fields.ept_executable == 0))
		{			
			if (exit_qualification.fields.read_access && exit_qualification.fields.ept_readable == 0)
			{
				//The Patch Guard will check this.
				if (FullEptStructAccess(&(PprocessorData->pRing0HookData), UtilPfnFromPa(fault_pa), ept_entry))
				{
					KdPrint(("[Whispher][ReadAccess]Full Ring0 Success!\n"));
				}
				else {
					KdPrint(("[Whispher][ReadAccess]Full Failed !\n"));
				}
				KdPrint(("[Wispher]Cpu:%d fix read access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
			}

			if (exit_qualification.fields.write_access && exit_qualification.fields.ept_writeable == 0)
			{
				//Ept need special deal.
				if (FullEptStructWriteAccess(&(PprocessorData->pRing0HookData), UtilPfnFromPa(fault_pa), ept_entry, fault_pa))
				{
					KdPrint(("[Whispher]Full Ring0 Write Access Success!\n"));
				}
				/*else if (FullEptStructForDirTableWriteAccess(&(PprocessorData->pRing3HookData), UtilPfnFromPa(fault_pa), ept_entry, fault_pa)) {
					KdPrint(("[Whispher]Full Ring3 Write Access Faile!\n"));
				}*/
				else {
					KdPrint(("[Whispher]Full Write Access Faile!\n"));
				}
				//KdPrint(("[Wispher]Cpu:%d fix write access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
			}

			if (exit_qualification.fields.execute_access && exit_qualification.fields.ept_executable == 0)
			{
				if (FullExecuteEptStructAccess(&(PprocessorData->pRing0HookData), UtilPfnFromPa(fault_pa), ept_entry, fault_pa))
				{
					KdPrint(("[Whispher]Full EPT Execute Ring0 Success!\n"));
				}
				else {
					KdPrint(("[Whispher]Full Faile!\n"));
				}
				//KdPrint(("[Wispher]Cpu:%d fix execute access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
			}
		}

		/*if (exit_qualification.fields.read_access && exit_qualification.fields.ept_readable == 0)
		{
			ept_entry->fields.read_access = 1;
			KdPrint(("[Wispher]Cpu:%d fix read access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
		} else if (exit_qualification.fields.write_access && exit_qualification.fields.ept_writeable == 0)
		{
			ept_entry->fields.write_access = 1;
			KdPrint(("[Wispher]Cpu:%d fix write access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
		} else if (exit_qualification.fields.execute_access &&exit_qualification.fields.ept_executable == 0)
		{
			ept_entry->fields.execute_access = 1;
			KdPrint(("[Wispher]Cpu:%d fix execute access for PA: 0x%I64X \n", KeGetCurrentProcessorIndex(), fault_pa));
		}*/

		UtilInveptGlobal();
		return;
	}

	//NT_ASSERT(EptpIsDeviceMemory(fault_pa));
	EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

	UtilInveptGlobal();
}

void VmmpHandleEptViolation(GuestContext *guest_context) {

	auto processor_data = guest_context->stack->processor_data;
	EptHandleEptViolation(processor_data);
}

void VmmpHandleVmExit(GuestContext *guest_context) {
	const VmExitInformation exit_reason = {
		static_cast<ULONG32>(Asm_VmRead((ULONG_PTR)VmcsField::kVmExitReason)) };

	switch (exit_reason.fields.reason) {
	case VmxExitReason::kExceptionOrNmi:
		VmmpHandleException(guest_context);
		break;
	case VmxExitReason::kTripleFault:
		KeBugCheck(0x7000002);
		//VmmpHandleTripleFault(guest_context);
		/* UNREACHABLE */
	case VmxExitReason::kCpuid:
		VmmpHandleCpuid(guest_context);
		break;
	case VmxExitReason::kInvd:
		VmmpHandleInvalidateInternalCaches(guest_context);
		break;
	case VmxExitReason::kInvlpg:
		VmmpHandleInvalidateTlbEntry(guest_context);
		break;
	case VmxExitReason::kRdtsc:
		VmmpHandleRdtsc(guest_context);
		break;
	case VmxExitReason::kCrAccess:
		VmmpHandleCrAccess(guest_context);
		break;
	case VmxExitReason::kDrAccess:
		VmmpHandleDrAccess(guest_context);
		//KeBugCheck(0x7000003);
		break;
	case VmxExitReason::kIoInstruction:
		VmmpHandleIoPort(guest_context);
		break;
	case VmxExitReason::kMsrRead:
		VmmpHandleMsrReadAccess(guest_context);
		break;
	case VmxExitReason::kMsrWrite:
		VmmpHandleMsrWriteAccess(guest_context);
		break;
	case VmxExitReason::kMonitorTrapFlag:
		VmmpHandleMonitorTrap(guest_context);
		break;
		//KeBugCheck(0x70000007);
		/* UNREACHABLE */
	case VmxExitReason::kGdtrOrIdtrAccess:
		VmmpHandleGdtrOrIdtrAccess(guest_context);
		break;
	case VmxExitReason::kLdtrOrTrAccess:
		//KeBugCheck(0x7000004);
		VmmpHandleLdtrOrTrAccess(guest_context);
		break;
	case VmxExitReason::kEptViolation:
		//KeBugCheck(0x7000005);
		VmmpHandleEptViolation(guest_context);
		break;
	case VmxExitReason::kEptMisconfig:
		KeBugCheck(0x7000006);
		//VmmpHandleEptMisconfig(guest_context);
		/* UNREACHABLE */
	case VmxExitReason::kVmcall:
		VmmpHandleVmCall(guest_context);
		break;
	case VmxExitReason::kVmclear:
	case VmxExitReason::kVmlaunch:
	case VmxExitReason::kVmptrld:
	case VmxExitReason::kVmptrst:
	case VmxExitReason::kVmread:
	case VmxExitReason::kVmresume:
	case VmxExitReason::kVmwrite:
	case VmxExitReason::kVmoff:
	case VmxExitReason::kVmon:
	case VmxExitReason::kInvept:
	case VmxExitReason::kInvvpid:
		VmmpHandleVmx(guest_context);
		break;
	case VmxExitReason::kRdtscp:
		VmmpHandleRdtscp(guest_context);
		break;
	case VmxExitReason::kXsetbv:
		VmmpHandleXsetbv(guest_context);
		break;
	default:
		KeBugCheckEx(0x7000010, (ULONG_PTR)(guest_context->ip), (ULONG_PTR)(guest_context->gp_regs->sp), (ULONG_PTR)(exit_reason.fields.reason), 0);
		//VmmpHandleUnexpectedExit(guest_context);
		/* UNREACHABLE */
	}
}

extern "C" bool __stdcall VmmExitHandler(VmmInitialStack *stack) {
	
	KIRQL guest_irql = KeGetCurrentIrql();

	ULONG_PTR guest_cr8 = Asm_readCr8();
	if (guest_irql  < DISPATCH_LEVEL) {
		KeRaiseIrqlToDpcLevel();
	}
	GuestContext guest_context = { stack,
		Asm_VmRead((ULONG_PTR)VmcsField::kGuestRflags),
		Asm_VmRead((ULONG_PTR)VmcsField::kGuestRip),
		guest_cr8,
		guest_irql,
		true };
	guest_context.gp_regs->sp = (ULONG_PTR)Asm_VmRead((ULONG_PTR)VmcsField::kGuestRsp);

	stack->trap_frame.sp = guest_context.gp_regs->sp;
	stack->trap_frame.ip = guest_context.ip + Asm_VmRead((ULONG_PTR)VmcsField::kVmExitInstructionLen);

	VmmpHandleVmExit(&guest_context);

	if (!guest_context.vm_continue)
	{
		UtilInveptGlobal();
		//UtilInvvipAllContext();
		UtilInvvpidAllContext();
		/*pStack->gp_regs.rdx = pStack->gp_regs.rsp;
		pStack->gp_regs.rcx = Asm_VmRead((ULONG_PTR)VmcsField::kGuestRip);*/
	}

	if (guest_context.irql < DISPATCH_LEVEL)
	{
		KeLowerIrql(guest_context.irql);
	}

	//KdPrint(("[Whisper]I'm Here!\n"));

	Asm_WriteCr8(guest_context.cr8);

	return guest_context.vm_continue;
}