/* Copyright (C) 2004,2005  The DESIROS Team
    desiros.dev@gmail.com

   This program is free software; you can redistribute it and/or
   modify it under the terms of the GNU General Public License
   as published by the Free Software Foundation; either version 2
   of the License, or (at your option) any later version.
   
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
   
   You should have received a copy of the GNU General Public License
   along with this program; if not, write to the Free Software
   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
   USA.
 */


#include <klibc.h>
#include <types.h>
#include <kmalloc.h>
#include <physmem.h>

#define _INITPD_ 
#include <mm.h>
#include <debug.h>
#include <process.h>
#include <list.h>
#include <kerrno.h>

/** Structure of the x86 CR3 register: the Page Directory Base
    Register. See Intel x86 doc Vol 3 section 2.5 */
struct x86_pdbr
{
  __u32 zero1          :3; /* Intel reserved */
  __u32 write_through  :1; /* 0=write-back, 1=write-through */
  __u32 cache_disabled :1; /* 1=cache disabled */
  __u32 zero2          :7; /* Intel reserved */
  __u32 pd_paddr       :20;
} __attribute__ ((packed));



void paging_init()
{

         page_directory = (__u32*) physmem_ref_physpage_new(false);
         physmem_ref_physpage_at((__u32) page_directory);
         memset(page_directory, 0, sizeof(__u32) * 1024);
         
           __u32 address = 0;   

         __u32 dir_i;
        for( dir_i = 0; dir_i < 1023; dir_i++){
            __u32* table =  (__u32*) physmem_ref_physpage_new(false);
            page_directory[dir_i] = (__u32)table  | P_READ | P_WRITE| P_USER ;

              __u32 tbl_i ;
              for(tbl_i = 0; tbl_i < 1024; tbl_i++){
                 table[tbl_i] = address | P_READ | P_WRITE ;
                 address += 4096;
              }
        }


                 /* Page table mirroring magic trick !... */
	page_directory[1023] = ((__u32) page_directory | (P_PRESENT | P_WRITE));

        asm("   mov %0, %%eax    \n \
                mov %%eax, %%cr3 \n \
                mov %%cr4, %%eax \n \
		or %2, %%eax \n \
		mov %%eax, %%cr4 \n \
                mov %%cr0, %%eax \n \
                or %1, %%eax     \n \
                mov %%eax, %%cr0 \n \
                jmp 1f\n  \
                1: \n \
                movl $2f, %%eax\n \
                jmp *%%eax\n \
                2:\n" :: "m"(page_directory), "i" (PAGING_FLAG) , "i"(PSE_FLAG));


             
}

 /* Associate a physical address to a virtual address , 
    puts the physical address in the PTE */
__u32 paging_map(__u32 virtual, __u32 physical, bool user,__u32 flags)
{


        __u32 *pde;
	__u32 *pte;

        if(virtual & 0xfff)
        kprintf("Virtual address not page-aligned\n");

        if(physical & 0xff)
        kprintf("Physical address not page-aligned\n");


     physmem_ref_physpage_at(physical);

     
	pde = (__u32 *) (0xFFFFF000 | (((__u32) virtual & 0xFFC00000) >> 20));

	if ((*pde & P_PRESENT) == 0){ 
		debug("PANIC: paging_map(): kernel page table not found !\n");
                 return -ENOMEM;
		}
	

	/* Changing the entry in the page table */
	pte = (__u32 *) (0xFFC00000 | (((__u32) virtual & 0xFFFFF000) >> 10));

	*pte = ((__u32) physical) | (P_PRESENT | (user ? 4: 0)| flags);

       return 0;
}

__u32 paging_unmap(__u32 virtual)
{

__u32 phys = paging_virtual_to_physical(page_directory,virtual);

         __u32 *pte;

        if(virtual & 0xfff){
        debug("Virtual address not page-aligned\n");
          return 1;
          }
  	
		pte = (__u32 *) (0xFFC00000 | (((__u32) virtual & 0xFFFFF000) >> 10));
		*pte = (*pte & (~P_PRESENT));
		flush_tlb_single(virtual);
	
          physmem_unref_physpage(phys);

	return 0;
}

int paging_unmap_interval(__u32 vaddr, __u32 size)
{
  int retval = 0;

  if (! IS_PAGE_ALIGNED(vaddr))
    return -1;
  if (! IS_PAGE_ALIGNED(size))
    return -1;

  for ( ;
	size >= PAGE_SIZE ;
	vaddr += PAGE_SIZE, size -= PAGE_SIZE)
    if (0 == paging_unmap(vaddr))
      retval += PAGE_SIZE;

  return retval;
}


__u32 paging_virtual_to_physical(__u32* page_directory, __u32 virtual){

#define	VADDR_PG_OFFSET(addr)	(addr) & 0x00000FFF

        __u32 *pde;		
	__u32 *pte;		

	pde = (__u32 *) (0xFFFFF000 | (((__u32) virtual & 0xFFC00000) >> 20));
	if ((*pde & P_PRESENT)) {
		pte = (__u32 *) (0xFFC00000 | (((__u32) virtual & 0xFFFFF000) >> 10));
		if ((*pte & P_PRESENT))
			return (__u32) ((*pte & 0xFFFFF000) + (VADDR_PG_OFFSET((__u32) virtual)));
	}

	return 0;
}




__u32  paging_get_current_PD()
{
   struct x86_pdbr pdbr;
  asm volatile("movl %%cr3, %0\n": "=r"(pdbr));
  return (pdbr.pd_paddr << 12);
}

__u32 paging_load_PD(__u32  paddr_PD){

  struct x86_pdbr pdbr;

  if(paddr_PD == 0)
    debug();


  /* Setup the value of the PDBR */
  memset(& pdbr, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */
  pdbr.pd_paddr = (paddr_PD >> 12);

  /* Configure the MMU according to the PDBR */
  asm volatile ("mov %0, %%eax    \n \
                 movl %%eax,%%cr3\n" ::"r"(pdbr));

  return OK;
}



__u32* paging_pd_create()
{
	__u32 *pd = NULL;
        __u32  *tmp_dest_pd ,* tmp_src_pd;
	int i;
    

	pd = (__u32*)physmem_ref_physpage_new(false);

       if (NULL == (void*)pd)
	{  
	  return -ENOMEM;
	}

        physmem_ref_physpage_at((__u32) pd);

  tmp_dest_pd  = (__u32 *)kvmm_alloc(1, 0);
  if (! tmp_dest_pd)
    return -ENOMEM;

   paging_map((__u32)tmp_dest_pd, (__u32)pd, false,
					 P_READ
					 | P_WRITE);

  tmp_src_pd  = (__u32 *)kvmm_alloc(1, 0);
  if (! tmp_src_pd)
    return -ENOMEM;

   paging_map((__u32)tmp_src_pd, (__u32)page_directory, false,
					 P_READ
					 | P_WRITE);

       for (i = 0 ; i < 256; i++)
	tmp_dest_pd[i] = tmp_src_pd[i]  ;

        tmp_dest_pd[1023] = (__u32) pd | (P_PRESENT | P_WRITE ) ;

    paging_unmap((__u32)tmp_src_pd);
    paging_unmap((__u32)tmp_dest_pd);
 
	return pd;
}

int paging_pd_add_pt(char *vaddr, __u32 *pd)
{


	__u32 *pde;		
	__u32 *pt;		
	__u32 pg_paddr,pg_vaddr;
	int i;

	pde = (__u32 *) (0xFFFFF000 | (((__u32) vaddr & 0xFFC00000) >> 20));


	if ((*pde & P_PRESENT) == 0) {

  pg_vaddr = kvmm_alloc(1, 0);


  if (pg_vaddr == (__u32)NULL){
    debug();
    return -3;
    }
  memset((void*)pg_vaddr, 0x0, PAGE_SIZE);

  
  /* Keep a reference to the underlying pphysical page... */
  pg_paddr =  paging_virtual_to_physical( page_directory,pg_vaddr);
  if(NULL == (void*)pg_paddr){

               debug();
               return 0 ;

  }
           
  physmem_ref_physpage_at(pg_paddr);

		/* It initializes the new page table */
		pt = (__u32 *) pg_vaddr;
		for (i = 0; i < 1024; i++)
			pt[i] = 0;



		/* The corresponding entry is added to the directory */
		*pde = ((__u32) pg_paddr) | (P_PRESENT | P_READ | P_WRITE | P_USER );
		
	}


	return 0;
}

int paging_copy_user_space(__u32  dest_paddr_PD,
				     __u32  src_paddr_PD){
__u32 *tmp_src_pt, *tmp_dest_pt, *tmp_dest_pd ,* tmp_src_pd;
int index_in_pd,i;

  tmp_dest_pd  = (__u32 *)kvmm_alloc(1, 0);
  if (! tmp_dest_pd)
    return -ENOMEM;

   paging_map((__u32)tmp_dest_pd, (__u32)dest_paddr_PD, true,
					 P_READ
					 | P_WRITE);

  tmp_src_pd  = (__u32 *)kvmm_alloc(1, 0);
  if (! tmp_src_pd)
    return -ENOMEM;

   paging_map((__u32)tmp_src_pd, (__u32)src_paddr_PD, true,
					 P_READ
					 | P_WRITE);



 /* Allocate 2 pages in kernel space to map the PT in order to
     perform the copy of the PTs from source to destination */
  tmp_src_pt  = (__u32 *)kvmm_alloc(1, 0);
  if (! tmp_src_pt)
    return -ENOMEM;

  tmp_dest_pt = (__u32 *)kvmm_alloc(1, 0);
  if (! tmp_dest_pt)
    {
      kvmm_free((__u32)tmp_dest_pt);
      return -ENOMEM;
    }

 /* Copy each used PT from source to destination */
  for (index_in_pd = 256 ; index_in_pd < 1023 ; 
       index_in_pd ++){

      __u32 paddr_dest_pt;
      int index_in_pt;

       if ((tmp_src_pd[index_in_pd] & P_PRESENT) == 0)
           continue ;

     /* Allocate the destination PT */
      paddr_dest_pt = physmem_ref_physpage_new(true);
      if (NULL == (void*)paddr_dest_pt)
	{
	  
	  /* Unallocate temporary kernel space used for the copy */
	  kvmm_free((__u32)tmp_src_pt);
	  kvmm_free((__u32)tmp_dest_pt);
	  return -ENOMEM;
	}

 /* Map source and destination PT */
      paging_map( (__u32)tmp_src_pt,tmp_src_pd[index_in_pd] << 12 , true ,
					 P_READ);
      paging_map((__u32)tmp_dest_pt, paddr_dest_pt, true,
					 P_READ
					 | P_WRITE);

               for (index_in_pt = 0; index_in_pt < 1023; index_in_pt++)
			tmp_dest_pt[index_in_pt]  = 0;


  /* Copy the contents of the source to the destination PT,
	 updating the reference counts of the pages */
      for (index_in_pt = 0 ; index_in_pt < 1023 ; index_in_pt ++)
	{
	  /* Copy the source PTE */
	  tmp_dest_pt[index_in_pt] = tmp_src_pt[index_in_pt] ;
	  tmp_dest_pt[index_in_pt] &= ~P_WRITE ;
	  /* Ignore non-present pages */
	  if (( tmp_dest_pt[index_in_pt] & P_PRESENT) == 0 )
	    continue;

	  /* Increase the reference count of the destination page */
	  physmem_ref_physpage_at(tmp_src_pt[index_in_pt] << 12);

	}

/* Unmap the temporary PTs */
      paging_unmap((__u32)tmp_src_pt);
      paging_unmap((__u32)tmp_dest_pt);
      
      /* Update the destination PDE */
      tmp_dest_pd[index_in_pd] = ((__u32)paddr_dest_pt) | (P_PRESENT | P_READ | P_WRITE | P_USER ) ;
    }

    paging_unmap((__u32)tmp_src_pd);
    paging_unmap((__u32)tmp_dest_pd);

       return OK;
}


 

int paging_try_resolve_COW(__u32 uaddr, __u32 *pd)
{

     __u32 *pde;		
     __u32 *pte;
     __u32 new_ppage, phy_pg;
     __u32 vpage_src, tmp_dest;


     pde = (__u32 *) (0xFFFFF000 | (((__u32) uaddr & 0xFFC00000) >> 20));

          if ((*pde & P_PRESENT) == 0){ 
		 debug();
                 return -EPERM ;
		}

     pte = (__u32 *) (0xFFC00000 | (((__u32) uaddr & 0xFFFFF000) >> 10));

         
     if ((*pte & P_PRESENT) == 0)
     return -EPERM ;


      /* For that, we allocate the destination page inside the kernel
	 space to perform the copy. We will transfer it into its
	 final user-space address later */
      tmp_dest = kvmm_alloc(1, KVMM_MAP);
      if (! tmp_dest)
	return -ENOMEM;

      /* copy the contents of the page */
      vpage_src = PAGE_ALIGN_INF(uaddr);
      memcpy((void*)tmp_dest, (void*)vpage_src, PAGE_SIZE);


   /* replace the original (read-only) mapping with a (read/write)
	 mapping to the new page. This will automatically unreference
	 the original page */
      new_ppage =  paging_virtual_to_physical( pd, tmp_dest);
       
      if(new_ppage == (__u32)NULL){
        debug();
        return -ENOMEM;
       }
       

      *pte = ((__u32) new_ppage) | (P_PRESENT | P_READ | P_WRITE | P_USER ) ;
         
      return OK;
    

}

