typedef sim_error	mmu_stall;
#define MMU_CONTINUE		0
#define MMU_STALL_L1		1
#define MMU_STALL_L2		2
#define MMU_STALL_L3		3
#define MMU_STALL_MEM		4
#define MMU_STALL_BUSY		5
#define MMU_STALL_DEVICE	6

typedef struct mmu_exception	mmu_except;
struct mmu_exception {
	sim_size	ExcCode;	
	sim_size	BadVAddr;
	sim_size	Context;
};

typedef int mmu_src;
#define MMU_OP_INST		1
#define MMU_OP_DATA		2
#define MMU_OP_DMA		3

typedef int mmu_dest;
#define MMU_OP_ROM		1	// Read ROM
#define MMU_OP_RAM		2	// Read/Write RAM
#define MMU_OP_DEV		3	// Read/Write Device
#define MMU_OP_L1I		4	// Read/Write/Modify cache
#define MMU_OP_L1D		5	// Read/Write/Modify cache
#define MMU_OP_L2		6	// Read/Write/Modify cache
#define MMU_OP_L3		7	// Read/Write/Modify cache

typedef int mmu_opcode;
#define MMU_OP_READ					1
#define MMU_OP_WRITE				2
#define MMU_OP_INDEX_INVALIDATE		3
#define MMU_OP_INDEX_LOAD_TAG		4
#define MMU_OP_INDEX_STORE_TAG		5
#define MMU_OP_HIT_INVALIDATE		6
#define MMU_OP_HIT_WB_INVALIDATE	7
#define MMU_OP_ADDR_FILL			8
#define MMU_OP_HIT_WB				9
#define MMU_OP_FETCH_LOCK			10

typedef int mmu_op_flags;
#define F_MMU_NOWAIT		0x01
#define F_MMU_NOPRIVS		0x02
#define F_MMU_DMA			0x04
#define F_MMU_CPU			0x08
#define F_MMU_INTERNAL		0x10
#define F_MMU_NOCOHERENT	0x20
#define F_MMU_COHERENT		0x40
#define F_MMU_NOCACHE		0x80

typedef struct mmu_operation	mmu_op;
typedef struct mmu_device 		mmu_dev;
typedef struct mmu_queue		mmu_q;
typedef struct mmu_cache		mmu_cache;
typedef struct cache_statistics	cache_stats;
typedef struct mmu_statistics	mmu_stats;

struct mmu_operation {
	sim_size		unit;		// Each co-processor and device has its own unit number
								// the CPU is always 0, internal messages are ((sim_cycle)-1)
	sim_cycle		inst;		// ID of operation (caller assigned, must be unique)
								// this is generally a cycle number
	sim_cycle		delay;		// Cycles until completion
	sim_addr		address;	// Address, virtual or physical depending on queue
	sim_size		*data;		// Data read in/ write out
	sim_size		length;		// Size of data

	sim_cycle		start;		// The cycle the original operation started at
								// even if this is the second or third incarnation
								// of the same request
	
	uint8_t			done;		// OP is ready to be advanced to next stage

	mmu_src			origin;		// INST/DATA/DMA (statistical purposes)
	mmu_dest		dest;		// Destination of the action
	mmu_dev			*dev;		// If dest is device set it here
	mmu_opcode		code;		// information about the operation
	mmu_op_flags	flags;		// flags
	mmu_op			*ref;		// OP to block on
	mmu_op			*next;		// Next operation in queue
};

struct mmu_queue {
	sim_size	count;
	mmu_op		*head;
	mmu_op		*tail;
};

static inline void mmu_pushq(mmu_q *q, mmu_op *op) {
	if(q->tail)
		q->tail->next = op;
	else
		q->head = q->tail = op;
	op->next = NULL;
}

static inline mmu_op *mmu_popq(mmu_q *q) {
	mmu_op *op = q->head;
	if(q->head)
		q->head = q->head->next;
	else
		q->tail = NULL;
	
	return op;
}

typedef sim_error (*device_read)(sim_addr paddr, uint8_t len, sim_size *data);
typedef sim_error (*device_write)(sim_addr paddr, uint8_t len, sim_size *data);

struct mmu_device {
	sim_addr		start;
	sim_addr		end;

	sim_cycle		read_delay;
	sim_cycle		write_delay;
	
	device_read		read;
	device_write	write;

	mmu_q			q;

	mmu_dev			*next;
};

// Types of address mapping
typedef int map_type;
#define MMU_MAP_DEFAULT	0
#define MMU_MAP_NONE	1
#define MMU_MAP_FIXED	2
#define MMU_MAP_TLB		3

// Types of caching
typedef int cache_type;
#define MMU_CACHE_DEFAULT	0
#define MMU_CACHE_BARE		1
#define MMU_CACHE_COHERENT	2
#define MMU_CACHE_OPAQUE	3

// Fixed address mappings
#ifdef MIPS64
	// During an ECC exception addresses in this region
	// become an unmapped uncached window on phys memory
	#define MMU_RSTART_ECC		0x0000000000000000ULL
	#define MMU_REND_ECC		0x000000007FFFFFFFULL

	// KUSEG is cached, mapped, user accessable memory
	#define MMU_RSTART_KUSEG	0x0000000000000000ULL
	#define MMU_REND_KUSEG		0x3FFFFFFFFFFFFFFFULL

	// XSSEG is cached, mapped, supervisor accessable memory
	// available only in 64-bit mode
	#define MMU_RSTART_XSSEG	0x4000000000000000ULL
	#define MMU_REND_XSSEG		0x8FFFFFFFFFFFFFFFULL

	// CWIN is a cached window on physical memory available
	// only in 64-bit mode
	#define MMU_RSTART_CWIN		0x9000000000000000ULL
	#define MMU_REND_CWIN		0x97FFFFFFFFFFFFFFULL

	// UWIN in an uncached window on physical memory available
	// only in 64-bit mode
	#define MMU_RSTART_UWIN		0x9800000000000000ULL
	#define MMU_REND_UWIN		0xBFFFFFFFFFFFFFFFULL

	// XKSEG is cached, mapped, kernel accessable memory
	// available only in 64-bit mode
	#define MMU_RSTART_XKSEG	0xC000000000000000ULL
	#define MMU_REND_XKSEG		0xFFFFFFFF7FFFFFFFULL

	// KSEG0 is a cached window on physical memory
	#define MMU_RSTART_KSEG0	0xFFFFFFFF80000000ULL
	#define MMU_REND_KSEG0		0xFFFFFFFF9FFFFFFFULL

	// KSEG1 is an uncached window on physical memory
	#define MMU_RSTART_KSEG1	0xFFFFFFFFA0000000ULL
	#define MMU_REND_KSEG1		0xFFFFFFFFBFFFFFFFULL

	// SSEG0 is cached, mapped, supervisor accessable memory
	#define MMU_RSTART_SSEG0	0xFFFFFFFFC0000000ULL
	#define MMU_REND_SSEG0		0xFFFFFFFFDFFFFFFFULL

	// KSEG2 is cached, mapped, kernel accessable memory
	#define MMU_RSTART_KSEG2	0xFFFFFFFFE0000000ULL
	#define MMU_REND_KSEG2		0xFFFFFFFFFFFFFFFFULL
#else /* !MIPS64 */
	// These are defined the same above but for a 32 bit
	// machine
	#define MMU_RSTART_ECC		0x00000000UL
	#define MMU_REND_ECC		0x7FFFFFFFUL
	#define MMU_RSTART_KUSEG	0x00000000UL
	#define MMU_REND_KUSEG		0x7FFFFFFFUL
	#define MMU_RSTART_KSEG0	0x80000000UL
	#define MMU_REND_KSEG0		0x9FFFFFFFUL
	#define MMU_RSTART_KSEG1	0xA0000000UL
	#define MMU_REND_KSEG1		0xBFFFFFFFUL
	#define MMU_RSTART_SSEG0	0xA0000000UL
	#define MMU_REND_SSEG0		0xDFFFFFFFUL
	#define MMU_RSTAER_KSEG2	0xE0000000UL
	#define MMU_REND_KSEG2		0xFFFFFFFFUL
#endif /* !MIPS64 */

typedef mmu_region_flags;
#define MMU_RCACHE		0x01
#define MMU_RNOCOHERENT	0x02
#define MMU_RMAPPED		0x04
#define MMU_RUSER		0x08
#define MMU_RSUPER		0x10
#define MMU_RKERN		0x20
#define MMU_PROT_READ	0x40
#define MMU_PROT_WRITE	0x80

#define MMU_TLB_NOCACHE		0x10
#define MMU_TLB_NOCOHERENT	0x18

struct mmu_cache {
	sim_cycle		hit_cost;
	CACHE			*cache;
	mmu_q			q;	
};

struct cache_statistics {
	sim_size		hits;				// total cache hits
	sim_size		misses;				// total cache misses
	sim_size		writebacks;			// total cache lines written back
	sim_size		evictions;			// total cache lines evicted from cache
};

struct mmu_statistics {
	sim_size		stalls;				// Structural stalls triggered by the MMU
	sim_size		exceptions;			// Exceptions originating in the MMU
	sim_size		real_exceptions;	// Exceptions originating in the MMU that aren't clobbered

	sim_size		unmapped_reads;		// reads from unmapped virtual addresses
	sim_size		unmapped_writes;	// writes to unmapped virtual addresses

	sim_size		uncached_reads;		// reads from uncached memory regions
	sim_size		uncached_writes;	// writes to uncached memory regions

	sim_size		instruction_reads;	// total instruction reads
	sim_size		data_reads;			// total data reads (memory/device)
	sim_size		data_writes;		// total data writes (memory/device)

	sim_size		tlb_hits;			// Addresses found in TLB
	sim_size		tlb_misses;			// Addresses not found in TLB
	
	cache_stats		L1I;				// L1 instruction cache statistics
	cache_stats		L1D;				// L1 data cache statistics
	cache_stats		L2;					// L2 cache statistics
	cache_stats		L3;					// L3 cache statistics

// The following include cache hits/misses:
	sim_size		ram_reads;			// Reads from RAM memory
	sim_size		ram_writes;			// Writes to RAM memory

	sim_cycle		ram_read_cycles;	// Cycles spent reading RAM memory
	sim_cycle		ram_write_cycles;	// Cycles spent writing RAM memory

	// NOTE: You can't write ROM so there are no statistics for that
	sim_size		rom_reads;			// Reads from ROM memory
	sim_size		rom_read_cycles;	// Cycles spent reading ROM memory

	sim_size		dev_reads;			// Reads from device memory
	sim_size		dev_writes;			// Writes to device memory

	sim_cycle		dev_read_cycles;	// Cycles spent reading device memory
	sim_cycle		dev_write_cycles;	// Cycles spent writing device memory
};

#define MMU_CACHE_L1I	0
#define MMU_CACHE_L1D	1
#define MMU_CACHE_L2	2
#define MMU_CACHE_L3	3
#define MMU_CACHE_NONE	4

#define L1_ICACHE(h)	((h)->mmu->L1I)
#define L1_DCACHE(h)	((h)->mmu->L1D)
#define L2_CACHE(h)		((h)->mmu->L2)
#define L3_CACHE(h)		((h)->mmu->L3)

#define L1_ICACHE_Q(h)	(&(L1_ICACHE(h).q))
#define L1_DCACHE_Q(h)	(&(L1_DCACHE(h).q))
#define L2_CACHE_Q(h)	(&(L2_CACHE(h).q))
#define L3_CACHE_Q(h)	(&(L3_CACHE(h).q))

#define RAM(h)			((h)->mmu->memory)
#define ROM(h)			((h)->mmu->memory)

struct mmu_context {
	map_type		mtype;				// type of address translation to use
	cache_type		ctype;				// type of caching to use

	mmu_stall		s_cause;			// if a stall occurs the reason is set here
	mmu_except		*ex_context;		// if an exception occurs the context is saved here

	mmu_stats		*cpu_stats;			// statistics about CPU access to MMU
	mmu_stats		*dma_stats;			// statistics about DMA access to MMU

	TLB				*tlb;				// TLB, if required for address translation

	mmu_cache		L1I;				// L1 instruction cache and queue
	mmu_cache		L1D;				// L1 data cache and queue
	mmu_cache		L2;					// L2 cache and queue
	mmu_cache		L3;					// L3 cache and queue

	sim_size		dma_clients;		// number of distinct DMA clients

	sim_size		ndev;				// number of memory mapped devices
	mmu_dev			*devs;				// memory mapped devices

	mmu_op			ready_ops;			// completed operations

	sim_size		ram_low;			// beginning of RAM
	sim_size		ram_high;			// ending of RAM
	sim_cycle		ram_speed;			// cycles needed to read RAM

	sim_size		rom_low;			// beginning of ROM
	sim_size		rom_high;			// ending of ROM
	sim_size		rom_speed;			// cycles needed to read ROM

	phys_mem		memory;				// combined RAM/ROM data structure
};


