/*
 *  mmap() - POSIX 1003.1b 6.3.1 - map pages of memory
 *
 *  COPYRIGHT (c) 2010.
 *  Chris Johns (chrisj@rtems.org)
 *
 *  The license and distribution terms for this file may be
 *  found in the file LICENSE in this distribution or at
 *  http://www.rtems.com/license/LICENSE.
 *
 *  $Id$
 */

#if HAVE_CONFIG_H
#include "config.h"
#endif

#include <rtems.h>
#include <errno.h>
#include <stdlib.h>
#include <rtems/libio_.h>
#include <sys/stat.h>
#include <unistd.h>

#include <rtems/libmmu.h>
#include <rtems/posix/mman.h>
#include <rtems/posix/mmap-internal.h>

#define RTEMS_MUTEX_ATTRIBS \
  (RTEMS_PRIORITY | RTEMS_SIMPLE_BINARY_SEMAPHORE | \
   RTEMS_NO_INHERIT_PRIORITY | RTEMS_NO_PRIORITY_CEILING | RTEMS_LOCAL)

/**
 * POSIX mmap_mappings protection domain
 */
rtems_memory_protection_domain mmap_mappings;

/**
 * The id of the MMAP lock.
 */
static rtems_id mmap_mappings_lock;

/**
 * Create the lock.
 */
static
bool mmap_mappings_lock_create(
  void
)
{
  /*
   * Lock the mapping table. We only create a lock if a call is made. First we
   * test if a mapping lock is present. If one is present we lock it. If not
   * the libio lock is locked and we then test the mapping lock again. If not
   * present we create the mapping lock then release libio lock.
   */
  if ( mmap_mappings_lock == 0 ) {
    rtems_status_code sc = RTEMS_SUCCESSFUL;
    /*the initialization of mmap_mappings should not be here 
     * it should be called in somewhere early
     */
    sc = rtems_memory_protection_initialize();
    sc = rtems_memory_protection_initialize_domain(
           &mmap_mappings, //FIXME
           64
         );

    rtems_semaphore_obtain( rtems_libio_semaphore,
                            RTEMS_WAIT, RTEMS_NO_TIMEOUT );
    if ( mmap_mappings_lock == 0 )
      sc = rtems_semaphore_create( rtems_build_name( 'M', 'M', 'A', 'P' ),
                                   1,
                                   RTEMS_MUTEX_ATTRIBS,
                                   RTEMS_NO_PRIORITY,
                                   &mmap_mappings_lock );
    rtems_semaphore_release( rtems_libio_semaphore );
    if ( sc != RTEMS_SUCCESSFUL ) {
      errno = EINVAL;
      return false;
    }
  }
  return true;
}

bool mmap_mappings_lock_obtain(
  void
)
{
  if ( mmap_mappings_lock_create( ) ) {
    rtems_status_code sc;
    sc = rtems_semaphore_obtain( mmap_mappings_lock,
                                 RTEMS_WAIT, RTEMS_NO_TIMEOUT );
    if ( sc != RTEMS_SUCCESSFUL ) {
      errno = EINVAL;
      return false;
    }
  }
  return true;
}

bool mmap_mappings_lock_release(
  void
)
{
  rtems_status_code sc;
  sc = rtems_semaphore_release( mmap_mappings_lock );
  if (( sc != RTEMS_SUCCESSFUL ) && ( errno == 0 )) {
    errno = EINVAL;
    return false;
  }
  return true;
}

void *mmap(
  void *addr, size_t len, int prot, int flags, int fildes, off_t off
)
{
  struct stat   sb;
  rtems_memory_protection_permission attr;
  long page_mask;
  void * base;
  rtems_memory_protection_entry * p_entry;
  rtems_memory_protection_domain* domain_p;
  ssize_t r;

  errno = 0;
  
  if ( len == 0 ) {
    errno = EINVAL;
    return MAP_FAILED;
  }

  /*mmap don't support MAP_SHARED for now, so MAP_PRIVATE is necessary*/
  if(0 != (flags&MAP_SHARED) ||0 == (flags&MAP_PRIVATE)){
    errno = EINVAL;
    return MAP_FAILED;
  }

  page_mask = sysconf(_SC_PAGESIZE)-1;
  if( 0 != ((long)addr&page_mask) || 0 != ((long)off&page_mask) )
  {
    errno = EINVAL;
    return MAP_FAILED;
  }
  
  /*
   * Get a stat of the file to get the dev + inode number and to make sure the
   * fd is ok. The normal libio calls cannot be used because we need to return
   * MAP_FAILED on error and they return -1 directly without coming back to
   * here.
   */
  if ( fstat( fildes, &sb ) < 0 ) {
    errno = EBADF;
    return MAP_FAILED;
  }

  /*
   * Check the type of file we have and make sure it is supported.
   */
  if ( S_ISDIR( sb.st_mode ) || S_ISLNK( sb.st_mode )) {
    errno = ENODEV;
    return MAP_FAILED;
  }
  
  /*
   * Check to see if the mapping is valid for the file.
   */
  if ( S_ISREG( sb.st_mode )
       && (( off >= sb.st_size ) || (( off + len ) > sb.st_size ))) {
    errno = EOVERFLOW;
    return MAP_FAILED;
  }

  /*
   * Obtain the mmap lock. Sets errno on failure.
   */
  if ( !mmap_mappings_lock_obtain( ) )
    return MAP_FAILED;
  
  /*
   * ** FIXME: This is broken.
   * We can provide read, write and execute. Notice the no-execute is not actually work for now.
   * No matter PROT_EXE is set or not, we just ignore this bit.
   * the prot set the access attribute in CPU's user mode. so please make sure the CPU working
   * in the user mode( not supervisor mode).
   * for the read and write access, there will be a check about  weather the access set is 
   * available for the specific CPU .
   */
  attr &= ~( RTEMS_MEMORY_PROTECTION_WRITE_PERMISSION & RTEMS_MEMORY_PROTECTION_READ_PERMISSION );
  attr |= prot&(PROT_READ|PROT_WRITE);
  if(rtems_memory_protection_verify_permission(attr) != RTEMS_SUCCESSFUL){
    errno = ENOTSUP;
    mmap_mappings_lock_release( );
    return MAP_FAILED;
  }

  domain_p = &mmap_mappings;

  if (( flags & MAP_FIXED ) == MAP_FIXED ) {
    attr |= RTEMS_MEMORY_PROTECTION_MAP_FIXED;
    p_entry = (rtems_memory_protection_entry* )rtems_chain_first(&domain_p->active_mpe_chain);
    while(!rtems_chain_is_tail(&domain_p->active_mpe_chain,&p_entry->node)){
      if( (p_entry->region.base <= addr ) && \
        ((p_entry->region.base+p_entry->region.bounds )>addr) ){
          /*
           * If the map is fixed see if this address is already mapped. At this
           * point in time if there is an overlap in the mappings we return an
           * error.
           */
          mmap_mappings_lock_release( );
          errno = ENXIO;
          return MAP_FAILED;
      }
      p_entry = (rtems_memory_protection_entry* )rtems_chain_next(&p_entry->node);
    }
  }

  /*allocate mmap_mapping control block , dont need  in my projects for now*/
  if ( rtems_chain_is_empty(& (domain_p->idle_mpe_chain) )) {
    mmap_mappings_lock_release( );
    errno = ENOMEM;
    return NULL;
  }

  p_entry = (rtems_memory_protection_entry* )rtems_chain_get_unprotected(&(domain_p->idle_mpe_chain));

  /*block size should always be mutiple of the page size*/
  page_mask++;
  p_entry->region.bounds= (len/page_mask +1)* page_mask;
  p_entry->permissions = attr|RTEMS_MEMORY_PROTECTION_MAP_PRIVATE;
  
  if (( flags & MAP_FIXED ) != MAP_FIXED ) {
    base = malloc( len );
    if ( NULL == base ) {
      mmap_mappings_lock_release( );
      errno = ENOMEM;
      return MAP_FAILED;
    }

    /*
     * Do not seek on character devices, pipes or sockets.
     */
    if ( S_ISREG( sb.st_mode ) || S_ISBLK( sb.st_mode ) ) {
      if ( lseek( fildes, off, SEEK_SET ) < 0 ) {
        mmap_mappings_lock_release( );
        free( base );
        return MAP_FAILED;
      }
    }
    p_entry->region.base = base;
  }else{
    p_entry->region.base = addr;
  }
  
  r = read( fildes, p_entry->region.base, len);
  if ( r < 0 ) {
    mmap_mappings_lock_release( );
    if (( flags & MAP_FIXED ) != MAP_FIXED )
      free( base );    
    errno = ENXIO;
    return MAP_FAILED;
  }

  rtems_chain_append( &domain_p->active_mpe_chain, &p_entry->node);

  mmap_mappings_lock_release( );
  
  return p_entry->region.base;
}
