/* arch/x86/kernel/arch_mm.c  
 * 
 * Copyright (C) 2021, 2022 intirain. 
 *
 * This file is part of evx. 
 * 
 * evx is free software: you can redistribute it and/or modify 
 * it under the terms of the GNU General Public License as published by 
 * the Free Software Foundation, either version 3 of the License, or 
 * (at your option) any later version. 
 * 
 * evx is distributed in the hope that it will be useful, 
 * but WITHOUT ANY WARRANTY; without even the implied warranty of 
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 
 * GNU General Public License for more details. 
 * 
 * You should have received a copy of the GNU General Public License 
 * along with evx. If not, see <https://www.gnu.org/licenses/>
 */ 




#include <kernel/init.h>
#include <kernel/module.h>
#include <kernel/mm.h>
#include <kernel/kernel.h>
#include <kernel/kmlog.h>

#include <arch/archinfo.h>
#include <arch/mm.h>
#include <arch/string.h>

#include <sys/types.h>
#include <config.h>


static int __init init_mapping_32m(void);
static int __init init_kmlog_buf(void);

int setpageattr(void *p, int attr);

__archptr_t freeptr;

void *kernel_end;


/* the most important work is: get total size of ram, 
 * and initiallize page structures */

int arch_mm_init(void) {
	struct e820_entry 	*e;
	struct zone 		*z;
	struct page 		*p, *last;

	int 			i;

	__archptr_t		addr, total_pages;

	total_pages = 0;

	init_mapping_32m();
	init_kmlog_buf();

	kernel_end = (void *) (KERNEL_OFFSET | (__archptr_t) &_end);

	/* put them there, after that, are the page structures. */
	z = kmem_zones;

	memset(z, 0, sizeof(struct zone) * 16);


	for (e = E820_ADDR; e < E820_ADDR + 32; e++) {
		long npages;

		if (e->type > E820_TYPE_MAX || !e->type) 
			break;
		
		e->type = e->type == E820_TYPE_RAM ? ZONETYPE_RAM : ZONETYPE_OTHER;

		addr = PAGE_ALIGN(HIGH_AND_LOW(e->addrh, e->addrl, 32));

		npages = PAGE_NR(HIGH_AND_LOW(e->lenh, e->lenl, 32));
		
		z->start_addr = (void *) addr;
		z->type = e->type;
		z->total_pages += npages;

		/* any page */
		total_pages += npages;

		if (z->type == ZONETYPE_RAM) 
			nr_ram_pages += npages;

		++z;
	}

	p = kmem_pages = (struct page *) z;
	z = kmem_zones;

	while (z < (struct zone *) kmem_pages) {
		if (z->type != ZONETYPE_RAM && z->type != ZONETYPE_OTHER) 
			break;

		z->pages = p;

		for (i = 0; i < z->total_pages; i++) {
			last = p;

			p->info = (z - kmem_zones) << 24;
			p->next = p + 1;

			p++;
		}

		last->next = NULL;

		z++;
	}
	
	first_usable_ram = page_descr(PAGE_ALIGN((__archptr_t) p));
	usable_ram_pages = nr_ram_pages - (first_usable_ram - kmem_pages);

	return 0;
}



/* for a 4gb memory space, 12m used to create struct page */

static int __init init_mapping_32m(void) {
#if CONFIG_64BIT
	freeptr = 0x3000;

	/* already mapped to 1g */

	return 0;
#else

	__archptr_t *lvl1 = (void *) 0, 
		    *lvl2 = (void *) 4096;

	__archptr_t addr = 4096 + 7;
	int i, j;

	for (i = 0; i < 6; i++, addr += 4096) {
		lvl1[i] = addr;

		lvl1[0x300 + i] = addr;
	}

	addr = 7;
	
	for (i = 0; i < 6; i++) {
		lvl2 = (void *) (lvl1[i] & ~0xff);

		for (j = 0; j < 1024; j++, addr += 4096) 
			lvl2[j] = addr;
	}

	/* 16 kib, oh dear, my dear 16 kib :-( */

	freeptr = 0x7000;

	/* no need to invalidate() */

	return 0;
#endif
}

static int __init init_kmlog_buf(void) {
	kmlog_buf = (char *) freeptr;
	freeptr += KMLOG_SIZE;

	*kmlog_buf = 0;

	return 0;
}


/* stack - 8192 */
#define END 	0x8e000


void *early_kmalloc(int nbytes, int align) {
	void *p;

	freeptr = (freeptr + align - 1) & ~(align - 1);

	if (freeptr + nbytes > END) 
		return NULL;

	p = (void *) freeptr;
	freeptr += nbytes;

	return p;
}


int early_free(void *p, int nbytes) {
	if (p + nbytes != (void *) freeptr) 
		/* cannot free */
		return 1;

	freeptr -= nbytes;

	return 0;
}


/* map page s (virtual) to d... okay, what am i doing? */

void *__mapto(void *s, void *d, int attr) {
#if CONFIG_64BIT
	__archptr_t *lvl1, *lvl2, *lvl3, *p;
	__archptr_t dest_addr;

	dest_addr = (__archptr_t) d;
	
	lvl1 = (__archptr_t *) LVL1_PT[(dest_addr >> 39) & 4095];
	
	if (!lvl1) {
		if (!(lvl1 = early_kmalloc(4096, 4096))) 
			panic("mapto: Out of memory\n");

		LVL1_PT[(dest_addr >> 39) & 4095] = (__archptr_t) lvl1 | DEFAULT_PAGE_ATTR;
		memset(lvl1, 0, 4096);
	}

	p = (__archptr_t *) ((__archptr_t) lvl1 & ~0xfffu);
	lvl2 = (__archptr_t *) p[(dest_addr >> 30) & 4095];

	if (!lvl2) {
		if (!(lvl2 = early_kmalloc(4096, 4096))) 
			panic("mapto: Out of memory\n");

		p[(dest_addr >> 30) & 4095] = (__archptr_t) lvl2 | DEFAULT_PAGE_ATTR;
		memset(lvl2, 0, 4096);
	}

	lvl3 = (__archptr_t *) ((__archptr_t) lvl2 & ~0xfffu);

	if (s) 
		lvl3[(dest_addr >> 21) & 4095] = (__archptr_t) s | attr;

	else 
		lvl3[(dest_addr >> 21) & 4095] &= ~0xfffu | attr;

	return d;
#else

	__archptr_t dest_addr = (__archptr_t) d;
	__archptr_t *lvl1;
	__archptr_t *lvl2;

	dest_addr &= ~PAGE_MASK;
	
	/* 10 lvl1    10 lvl2    12 offset */
	lvl1 = (__archptr_t *) LVL1_PT[dest_addr >> 22];

	if (!lvl1) {
		if (!(lvl1 = early_kmalloc(4096, 4096))) 
			panic("mapto: Out of memory\n");

		LVL1_PT[dest_addr >> 22] = (__archptr_t) lvl1 | DEFAULT_PAGE_ATTR;
		memset(lvl1, 0, 4096);
	}

	lvl2 = (__archptr_t *) ((__archptr_t) lvl1 & ~0xffu);

	if (!s) 
		/* clear attributes and then set */

		lvl2[(dest_addr >> 12) & 1023] &= ~0xff | attr;

	else 
		lvl2[(dest_addr >> 12) & 1023] = (__archptr_t) s | attr;


	return (void *) dest_addr;
#endif
}


void *mapto(void *s, void *d) {
	return __mapto(s, d, DEFAULT_PAGE_ATTR);
}


int setpageattr(void *p, int attr) {
	__mapto(NULL, p, attr);

	return 0;
}


arch_init(arch_mm_init);


