// processor.h

/*
   Copyright 2006 Corey Tabaka

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
*/

#ifndef __KERNEL_PROCESSOR_H__
#define __KERNEL_PROCESSOR_H__

#include <ktypes.h>
#include <platform/cpufeature.h>

/*
 * Intel CPU features in CR0
 */
#define X86_CR0_PE		0x00000001
#define X86_CR0_MP		0x00000002
#define X86_CR0_EM		0x00000004
#define X86_CR0_TS		0x00000008
#define X86_CR0_WP		0x00010000
#define X86_CR0_NW		0x20000000	/* not write through */
#define X86_CR0_CD		0x40000000	/* cache disable */
#define X86_CR0_PG		0x80000000	/* paging enable */

static inline void setInCR0 (uint32 mask) {
	//mmu_cr0_features |= mask;
	__asm__("movl %%cr0,%%eax\n\t"
		"orl %0,%%eax\n\t"
		"movl %%eax,%%cr0\n"
		: : "irg" (mask)
		:"ax");
}

static inline void clearInCR0 (uint32 mask) {
	//mmu_cr0_features &= ~mask;
	__asm__("movl %%cr0,%%eax\n\t"
		"andl %0,%%eax\n\t"
		"movl %%eax,%%cr0\n"
		: : "irg" (~mask)
		:"ax");
}

/*
 * Intel CPU features in CR4
 */
#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
#define X86_CR4_DE		0x0008	/* enable debugging extensions */
#define X86_CR4_PSE		0x0010	/* enable page size extensions */
#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
#define X86_CR4_MCE		0x0040	/* Machine check enable */
#define X86_CR4_PGE		0x0080	/* enable global pages */
#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */

/*
 * Save the cr4 feature set we're using (ie
 * Pentium 4MB enable and PPro Global page
 * enable), so that any CPU's that boot up
 * after us can get the correct flags.
 */
extern unsigned long mmu_cr4_features;

static inline void setInCR4 (uint32 mask) {
	//mmu_cr4_features |= mask;
	__asm__("movl %%cr4,%%eax\n\t"
		"orl %0,%%eax\n\t"
		"movl %%eax,%%cr4\n"
		: : "irg" (mask)
		:"ax");
}

static inline void clearInCR4 (uint32 mask) {
	//mmu_cr4_features &= ~mask;
	__asm__("movl %%cr4,%%eax\n\t"
		"andl %0,%%eax\n\t"
		"movl %%eax,%%cr4\n"
		: : "irg" (~mask)
		:"ax");
}

/*
 * EFLAGS bits
 */
#define EFLAGS_CF	0x00000001 // Carry Flag
#define EFLAGS_PF	0x00000004 // Parity Flag
#define EFLAGS_AF	0x00000010 // Auxillary carry Flag
#define EFLAGS_ZF	0x00000040 // Zero Flag
#define EFLAGS_SF	0x00000080 // Sign Flag
#define EFLAGS_TF	0x00000100 // Trap Flag
#define EFLAGS_IF	0x00000200 // Interrupt Flag
#define EFLAGS_DF	0x00000400 // Direction Flag
#define EFLAGS_OF	0x00000800 // Overflow Flag
#define EFLAGS_IOPL	0x00003000 // IOPL mask
#define EFLAGS_NT	0x00004000 // Nested Task
#define EFLAGS_RF	0x00010000 // Resume Flag
#define EFLAGS_VM	0x00020000 // Virtual Mode
#define EFLAGS_AC	0x00040000 // Alignment Check
#define EFLAGS_VIF	0x00080000 // Virtual Interrupt Flag
#define EFLAGS_VIP	0x00100000 // Virtual Interrupt Pending
#define EFLAGS_ID	0x00200000 // CPUID detection flag

/*
 * Intel CPU features in CR4
 */
#define CR4_VME		0x0001	// enable vm86 extensions
#define CR4_PVI		0x0002	// virtual interrupts flag enable
#define CR4_TSD		0x0004	// disable time stamp at ipl 3
#define CR4_DE		0x0008	// enable debugging extensions
#define CR4_PSE		0x0010	// enable page size extensions
#define CR4_PAE		0x0020	// enable physical address extensions
#define CR4_MCE		0x0040	// Machine check enable
#define CR4_PGE		0x0080	// enable global pages
#define CR4_PCE		0x0100	// enable performance counters at ipl 3
#define CR4_OSFXSR		0x0200	// enable fast FPU save and restore
#define CR4_OSXMMEXCPT	0x0400	// enable unmasked SSE exceptions

/*
 * Generic CPUID function
 */
static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
{
	__asm__("cpuid"
		: "=a" (*eax),
		  "=b" (*ebx),
		  "=c" (*ecx),
		  "=d" (*edx)
		: "0" (op));
}

/*
 * CPUID functions returning a single datum
 */
static inline unsigned int cpuid_eax(unsigned int op)
{
	unsigned int eax;

	__asm__("cpuid"
		: "=a" (eax)
		: "0" (op)
		: "bx", "cx", "dx");
	return eax;
}
static inline unsigned int cpuid_ebx(unsigned int op)
{
	unsigned int eax, ebx;

	__asm__("cpuid"
		: "=a" (eax), "=b" (ebx)
		: "0" (op)
		: "cx", "dx" );
	return ebx;
}
static inline unsigned int cpuid_ecx(unsigned int op)
{
	unsigned int eax, ecx;

	__asm__("cpuid"
		: "=a" (eax), "=c" (ecx)
		: "0" (op)
		: "bx", "dx" );
	return ecx;
}
static inline unsigned int cpuid_edx(unsigned int op)
{
	unsigned int eax, edx;

	__asm__("cpuid"
		: "=a" (eax), "=d" (edx)
		: "0" (op)
		: "bx", "cx");
	return edx;
}

typedef struct {
	uint8 family;
	uint8 vendor;
	uint8 model;
	uint8 mask;
	
	bool wpWorksOk;		// it doesn't on 386
	bool hltWorksOk;	// problems on some 486Dx4 and old 386
	bool hardMath;
	bool rfu;
    
    int cpuidLevel;	// max supported CPUID level, -1=no CPUID
	
	uint32 capability[NUM_CAPS];
	
	char vendorId[16];
	char modelId[64];
	int cacheSize;		// Kb, valid for CPUS with support
	int cacheAlignment;	// bytes
	
	bool fdivBug;
	bool f00fBug;
	bool comaBug;
	
	uint8 numCores;
} cpu_info_x86_t;

typedef struct {
	int vendor;
	int family;
	char *modelNames[16];
} cpu_model_info_t;

typedef struct {
	char *vendor;

	// some have two possibilities for cpuid string
	char *ident[2];	

	cpu_model_info_t models[4];
} cpu_device_t;

#define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2
#define X86_VENDOR_UMC 3
#define X86_VENDOR_NEXGEN 4
#define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_RISE 6
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
#define X86_VENDOR_NUM 9
#define X86_VENDOR_UNKNOWN 0xff

#endif
