#include "fs.h"
#include "tools.h"
#include "alloc.h"

struct adpm_page *adpm_map;
atomic_t adpm_nr_mp;

DEFINE_SPINLOCK(adpm_ftab_lock);
DEFINE_HASHTABLE(adpm_ftab, ADPM_FTAB_BIT); //128buckets


// for file ops
static int adpm_open(struct inode *inode, struct file *file);
static int adpm_release(struct inode *inode, struct file *file);
static ssize_t adpm_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
static ssize_t adpm_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos);
static int adpm_fsync(struct file *file, loff_t start, loff_t end, int datasync);
static int adpm_mmap(struct file *file, struct vm_area_struct *vma);

//for dir inode ops
static int adpm_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev);
static struct inode * adpm_get_inode(struct super_block *sb, const struct inode *dir,
                umode_t mode, dev_t dev);
static int adpm_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl);
static int adpm_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);


static const struct super_operations adpm_sb_ops = {
  .alloc_inode  = NULL,
  .destroy_inode  = NULL,
  .write_inode  = NULL,
  .put_super = NULL, // same as kill sb
  .show_options = NULL, //TODO
};

static const struct address_space_operations adpm_add_ops = {
  .readpage = simple_readpage,
  .write_begin  = simple_write_begin,
  .write_end  = simple_write_end,
};

static struct backing_dev_info adpm_bdi = {
  .name   = "adpm",
  .ra_pages = 0,  /* No readahead */
  .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_DIRECT |
        BDI_CAP_MAP_COPY | BDI_CAP_READ_MAP |
        BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
};

static const struct file_operations adpm_dir_ops = {
  .open   = dcache_dir_open,
  .release  = dcache_dir_close,
  .llseek   = dcache_dir_lseek,
  .read   = generic_read_dir,
  .readdir  = dcache_readdir,
  .fsync    = noop_fsync,
};

static const struct inode_operations adpm_file_inode_ops = {
  .setattr  = simple_setattr,
  .getattr  = simple_getattr,
};


const struct file_operations adpm_file_ops = {
  .open   = adpm_open,
  .release  = adpm_release,
  .read   = adpm_read,
  .write    = adpm_write,
  .mmap   = adpm_mmap,
  .fsync    = adpm_fsync,
};

static const struct inode_operations adpm_dir_inode_ops = {
  .create   = adpm_create,
  .lookup   = simple_lookup,
  .link   = simple_link,
  .unlink   = simple_unlink,
  .mkdir    = adpm_mkdir,
  .rmdir    = simple_rmdir,
  .mknod    = adpm_mknod,
  .rename   = simple_rename,
};


int adpm_fill_super(struct super_block *sb, void *data, int silent);


typedef enum {
  Opt_addr, Opt_size, Opt_err,
} TOKEN;
static const match_table_t tokens = {
  { Opt_addr,   "physaddr=%s"   },
  { Opt_size,   "size=%s"   },
  { Opt_err,    NULL      },
};
static int adpm_parse_options(char *options, struct adpm_sb_info *sbi)
{
  char *p, *rest;
  substring_t args[MAX_OPT_ARGS];
  if(!options)
    return -EINVAL;
  while((p = strsep(&options, ",")) != NULL){
    TOKEN token;
    if(!*p)
      continue;
    token = match_token(p, tokens, args);
    switch(token){
      case Opt_addr:
        if(!isdigit(*args[0].from))
          goto out1;
        sbi->phys_addr = memparse(args[0].from, &rest);
        adpm_pr("%s phys addr is 0x%lx\n",__func__, sbi->phys_addr);
        break;
      case Opt_size:
        if(!isdigit(*args[0].from))
          goto out1;
        sbi->size = memparse(args[0].from, &rest);
        adpm_pr("%s size is 0x%lx\n",__func__, sbi->size);
        break;
      default:
        goto out1;
      break;
    }
  }
  return 0;
out1:
  pr_err("ERROR: %s mount options parse wrong\n", __FUNCTION__);
  return -EINVAL;
}
int init_adpm_allocator(struct adpm_sb_info *sbi){
  unsigned nr_pages, used_pfn;
  int ret = 0, i;

  #if ADPM_DEBUG
  memset((void *)sbi->virt_addr, 100, sbi->size);
  #endif
  
  adpm_map = (struct adpm_page *)sbi->virt_addr;
  used_pfn = 0;
  nr_pages = sbi->nr_pages;
  for(i = 0; i < nr_pages; i++){
    memset(adpm_map + i, 0, sizeof(struct adpm_page));
    INIT_LIST_HEAD(&((adpm_map + i)->lru));
    spin_lock_init(&((adpm_map + i)->lru_lock));
  }
  used_pfn += DIV_ROUND_UP(sizeof(struct adpm_page) * nr_pages, PAGE_SIZE);
  adpm_pr("adpm metadate used pfn %u\n", used_pfn);
  adpm_init_buddy(adpm_map, used_pfn, nr_pages, sbi);

  return ret;
}

static int adpm_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
  struct inode *inode;
  inode = adpm_get_inode(dir->i_sb, dir, mode, dev);
  if(IS_ERR(inode))
    return -ENOSPC;
  d_instantiate(dentry, inode);
  dget(dentry);
  dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  return 0;
}

int adpm_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
{
  return adpm_mknod(dir, dentry, mode | S_IFREG, 0);
}
int adpm_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode){
  int ret = adpm_mknod(dir, dentry, mode | S_IFDIR, 0);
  if (!ret)
    inc_nlink(dir);
  return ret;
}

static struct inode *
  adpm_get_inode(struct super_block *sb, const struct inode *dir,
                umode_t mode, dev_t dev)
{
  struct inode *inode;
  inode = new_inode(sb);
  if(!inode)
    return ERR_PTR(-ENOMEM);
  inode->i_ino = get_next_ino();
  inode_init_owner(inode, dir, mode);
  inode->i_mapping->a_ops = &adpm_add_ops;
  inode->i_mapping->backing_dev_info = &adpm_bdi;
  inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  switch (mode & S_IFMT){
    case S_IFREG:
      inode->i_op = &adpm_file_inode_ops;
      inode->i_fop = &adpm_file_ops;
      break;
    case S_IFDIR:
      inode->i_op = &adpm_dir_inode_ops;
      inode->i_fop = &adpm_dir_ops;
      inc_nlink(inode);/* dir inodes start with i_nlink == 2 (for "." entry) */
      break;
    default:
      pr_err("ERROR: adpm get inode wrong\n");
      return ERR_PTR(-EINVAL);
  }
  return inode;
}

int adpm_fill_super(struct super_block *sb, void *data, int silent)
{
  struct adpm_sb_info *sbi;
  struct inode *root;
  int ret;

  adpm_pr("%s\n", __func__); 

  sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  if(!sbi)
    return -ENOMEM;
  sb->s_fs_info = sbi;

  ret = adpm_parse_options(data, sbi);
  if(ret || !sbi->phys_addr || !sbi->size){
    pr_err("ERROR: parse options wrong \n");
    goto out1;
  }

  //init dram memory pool
  if(sbi->phys_addr % PAGE_SIZE || sbi->size % PAGE_SIZE){
    pr_err("ERROR: physaddr and size must be page aligned\n");
    goto out1;
  }
  sbi->nr_pages = sbi->size >> PAGE_SHIFT;
  sbi->virt_addr = (unsigned long)ioremap_cache(sbi->phys_addr, sbi->size);
  if(!sbi->virt_addr){
    pr_err("ERROR: ioremap fail\n");
    goto out1;
  }
  adpm_pr("%s viraddr %lx ~ %lx\n", __func__, sbi->virt_addr, sbi->virt_addr + sbi->size);

  ret = init_adpm_allocator(sbi);
  if(ret){
    pr_err("ERROR: init adpm allocator wrong\n");
    goto out2;
  }
  
  sb->s_op = &adpm_sb_ops;
  sb->s_xattr = NULL;
  sb->s_blocksize = PAGE_SIZE;
  sb->s_blocksize_bits = PAGE_SHIFT;
  sb->s_time_gran = 1;

  root = adpm_get_inode(sb, NULL, S_IFDIR | 0755, 0);
  if(IS_ERR(root)){
    pr_err("ERROR: adpm get inode wrong\n");
    ret = PTR_ERR(root);
    goto out3;
  }
  sb->s_root = d_make_root(root);
  if(!sb->s_root){
    pr_err("ERROR: make root wrong\n");
    ret = -ENOMEM;
    goto out3;
  }

  return 0;

out3:
  adpm_destroy_buddy();
out2:
  iounmap((void __iomem *)sbi->virt_addr);
  sbi->virt_addr = 0;
out1:
  if(sbi)
    kfree(sbi);
  return -EINVAL; 
  return ret;
}

static inline unsigned long adpm_hash_str(char *name)
{
  unsigned long hash = 0;
  unsigned long l = 0;
  int len = 0;
  unsigned char c;
  do {
    if (unlikely(!(c = *name++))) {
      c = (char)len; len = -1;
    }
    l = (l << 8) | c;
    len++;
    if ((len & (BITS_PER_LONG/8-1))==0)
      hash = hash_long(hash^l, BITS_PER_LONG);
  } while (len);
  return hash;
}

struct adpm_client_file *adpm_find_file(char *name){
  struct adpm_client_file *f;
  spin_lock(&adpm_ftab_lock);
  hash_for_each_possible(adpm_ftab, f, hash_head, adpm_hash_str(name)){
    if(!strcmp(name, f->name)){
      spin_unlock(&adpm_ftab_lock);
      return f;
    }
  }
  spin_unlock(&adpm_ftab_lock);
  return NULL;
}
int adpm_insert_file_to_table(struct adpm_client_file *f)
{
  spin_lock(&adpm_ftab_lock);
  hash_add(adpm_ftab, &f->hash_head, adpm_hash_str(f->name));
  spin_unlock(&adpm_ftab_lock);
  return 0;
}
struct adpm_client_file *adpm_del_file_from_table(char *name)
{
  struct adpm_client_file *f;
  f= adpm_find_file(name);
  if(!f)
    return NULL;
  if(atomic_add_return(1, &f->open_cnt) != 1){
    atomic_dec(&f->open_cnt);
    adpm_pr("file is opened ,cant del\n");
    return NULL;
  }
  spin_lock(&adpm_ftab_lock);
  hash_del(&f->hash_head);
  spin_unlock(&adpm_ftab_lock);
  return f;
}

static struct adpm_client_file *adpm_alloc_file(char *name, struct super_block *sb){
  struct adpm_client_file *f;
  if(!name)return NULL;
  f = kzalloc(sizeof(*f), GFP_KERNEL);
  if(!f)return NULL;
  f->magic = ADPM_FILE_MAGIC;
  f->pages.rb_node = NULL;
  f->dirtys.rb_node = NULL;
  f->sbi = sb->s_fs_info;
  atomic_set(&f->open_cnt, 0);
  atomic_set(&f->mmap_cnt, 0);
  spin_lock_init(&f->pages_lock);
  spin_lock_init(&f->dirtys_lock);
  if(strlen(name) >= ADPM_STR_LEN){
    kfree(f);
    return NULL;
  }
  strcpy(f->name, name);
  return f;
}
static char *adpm_get_file_name(struct path *p){
  struct dentry *d;
  char *ret;
  int l;
  if(!p)return NULL;
  if(!p->dentry)return NULL;
  d = p->dentry;
  spin_lock(&d->d_lock);
  l = strlen(d->d_name.name);
  ret = kzalloc(l, GFP_KERNEL);
  if(!ret){
    spin_unlock(&d->d_lock);
    return NULL;
  }
  strcpy(ret, d->d_name.name);
  spin_unlock(&d->d_lock);
  return ret;
}

//what if two thread open one file ?
static DEFINE_SPINLOCK(adpm_open_lock);
static int adpm_open(struct inode *inode, struct file *file)
{
  struct adpm_client_file *f;
  char *name;
  int ret;
  spin_lock(&adpm_open_lock);
  adpm_pr("begin open file\n");
  name = adpm_get_file_name(&file->f_path);
  if(!name){
    spin_unlock(&adpm_open_lock);
    return -ENOMEM;
  }
  if(strlen(name) >= ADPM_STR_LEN){
    ret = -E2BIG;
    pr_err("ERROR: %s name too long\n", __FUNCTION__);
    goto out1;
  }
  f = adpm_find_file(name);
  if(f){
    if(atomic_add_return(1, &f->open_cnt) != 1){
      atomic_dec(&f->open_cnt);
      ret = -EEXIST;
      pr_err("ERROR: %s file already opened\n", __FUNCTION__);
      goto out1;
    }else{//success open
      adpm_pr("success open file %s\n", name);
    }
  }else{
    adpm_pr("now we need alloc a new client file\n");
    f = adpm_alloc_file(name, inode->i_sb); 
    if(!f){
      ret = -ENOMEM;
      goto out1;
    }
    ret = adpm_insert_file_to_table(f);
    BUG_ON(ret);
    atomic_inc(&f->open_cnt);
  }
  
  file->private_data = f; 
out1:
  kfree(name);
  spin_unlock(&adpm_open_lock);
  return ret;
}

static int adpm_release(struct inode *inode, struct file *file)
{
  struct adpm_client_file *f = file->private_data;
  if(!f){
    pr_err("ERROR: file metadata missing\n");
    return 0;
  }
  adpm_pr("adpm release %s\n",f->name);
  atomic_dec(&f->open_cnt);
  if(atomic_read(&f->mmap_cnt)){
    struct rb_node *p;
    struct adpm_page *pg;
    pr_err("ERROR: mmap value should be 0\n");
    spin_lock(&f->pages_lock);
    p = rb_first(&f->pages);
    while(p){
      pg = container_of(p, struct adpm_page, vm_node);
      pg->rmap.pt = 0;
      p = rb_next(p);
    }
    spin_unlock(&f->pages_lock);
    atomic_set(&f->mmap_cnt, 0);
  }
  file->private_data = NULL;
  return 0;
}

ssize_t adpm_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
{
  pr_err("ERROR: read is not allowed\n");
  return -EINVAL;
}
ssize_t adpm_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos)
{
  pr_err("ERROR: write is not allowed\n");
  return -EINVAL;
}
int adpm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
  pr_err("ERROR: fsync is not allowed\n");
  return -EINVAL;
}

int adpm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
  pr_err("ERROR: not support page mkwrite\n");
  return -EINVAL;
}

static __always_inline pte_t *
adpm_get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
{
  pgd_t *pgd = pgd_offset(mm, addr);
  pud_t *pud = pud_alloc(mm, pgd, addr);
  if (pud) {
    pmd_t *pmd = pmd_alloc(mm, pud, addr);
    if (pmd) {
      return pte_alloc_map_lock(mm, pmd, addr, ptl);
    }
  }
  return NULL;
}

static __always_inline pte_t *
adpm_get_pte(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long addr)
{
  pgd_t *pgd = pgd_offset(mm, addr);
  pud_t *pud = pud_alloc(mm, pgd, addr);
  if (pud) {
    pmd_t *pmd = pmd_alloc(mm, pud, addr);
    if (pmd) {
      BUG_ON(pmd_trans_huge(*pmd));
      return pte_alloc_map(mm, vma, pmd, addr);
    }
  }
  BUG();
  return NULL;
}

static __always_inline int adpm_vm_insert_pfn(struct vm_area_struct *vma,
                 unsigned long addr,
                 unsigned long pfn,
                 bool is_write)
{
  pgprot_t pgprot;
  pte_t *pte, entry;
  spinlock_t *ptl = NULL;
  int ret = 0;
  pte = adpm_get_locked_pte(vma->vm_mm, addr, &ptl);
  pgprot = vma->vm_page_prot;
  pgprot_val(pgprot) |= _PAGE_RW;
  entry = pte_mkspecial(pfn_pte(pfn, pgprot));
  set_pte_at(vma->vm_mm, addr, pte, entry);
  pte_unmap_unlock(pte, ptl);
  return ret;
}
// enter the function with rbtree locked
void adpm_file_insert_dirty(struct adpm_client_file *f, struct adpm_dirty *di)
{
  struct rb_node **new, *p = NULL;
  struct adpm_dirty *tmp_di;
  new = &(f->dirtys.rb_node);
  while((p = *new)){
    tmp_di = container_of(*new, struct adpm_dirty, node);
    if(tmp_di->start_vir > di->start_vir || 
      (tmp_di->start_vir == di->start_vir && tmp_di->end_vir > di->end_vir))
      new = &((*new)->rb_left);
    else new = &((*new)->rb_right);
  }
  rb_link_node(&di->node, p, new);
  rb_insert_color(&di->node, &f->dirtys);
}
// enter the function with rbtree locked
struct adpm_page *adpm_file_find_page(struct adpm_client_file *f, long vmpgoff)
{
  struct rb_node *node = f->pages.rb_node;
  long tmp_off;
  while(node){
    tmp_off = container_of(node, struct adpm_page, vm_node)->vm_pgoff;
    if(tmp_off == vmpgoff){
      return container_of(node, struct adpm_page, vm_node);
    }
    if(tmp_off > vmpgoff) node = node->rb_left;
    else node = node->rb_right;
  }
  return NULL;
}
// enter the function with rbtree locked
void adpm_file_insert_page(struct adpm_client_file *f, struct adpm_page *pg)
{
  struct rb_node **new, *p = NULL;
  struct adpm_page *tmp_page;
  new = &(f->pages.rb_node);
  while((p = *new)){
    tmp_page = container_of(*new, struct adpm_page, vm_node);
    BUG_ON(tmp_page->vm_pgoff == pg->vm_pgoff);
    if(tmp_page->vm_pgoff > pg->vm_pgoff)new = &((*new)->rb_left);
    else new = &((*new)->rb_right);
  }
  rb_link_node(&pg->vm_node, p, new);
  rb_insert_color(&pg->vm_node, &f->pages);
}
// enter the function with rbtree locked and lru list locked
struct adpm_page *adpm_insert_one_data_page(struct adpm_client_file *f, long pgoff, bool no_swap)
{
  struct adpm_page *pg;
  int ret;

  BUG_ON(!f);
  pg = adpm_buddy_get_one_page(no_swap);
  pg->f = f;
  BUG_ON(!pg);
  pg->vm_pgoff = pgoff;
  ret = adpm_server_get_page(f, pg, pgoff);
  if(ret){
    pr_err("ERROR: %s get page error\n", __FUNCTION__);
    adpm_buddy_free_one_page(pg);
    return ERR_PTR(-EIO);
  }
  adpm_pr("get page data done\n");
  adpm_file_insert_page(f, pg);
  adpm_pr("insert page done\n");
  return pg;
}
//enter with lru_list_lock locked
struct adpm_page *adpm_check_insert_one_data_page(struct adpm_client_file *f, long pgoff)
{
  struct adpm_page *pg;
  spin_lock(&f->pages_lock);
  pg = adpm_file_find_page(f, pgoff);
  if(!pg){// no page found , establish new page map
    adpm_pr("on page found, generate new map\n");
    pg = adpm_insert_one_data_page(f, pgoff, false);
    if(IS_ERR(pg)){
      spin_unlock_irqrestore(&f->pages_lock, flags);
      return pg;
    }
  }
  spin_unlock(&f->pages_lock);
  return pg;
}
static int adpm_get_fault_page(struct vm_area_struct *vma, unsigned long addr, int is_write)
{
  struct adpm_client_file *f = vma->vm_file->private_data;
  unsigned long pgoff;
  struct adpm_page *pg;
  unsigned long flags;
  if(!f){
    pr_err("ERROR fault file metadata miss\n");
    return VM_FAULT_SIGBUS;
  }
  pgoff = f->vminfo.vm_pgoff + ((addr - f->vminfo.vm_start)>>PAGE_SHIFT);
  
  spin_lock_irqsave(&adpm_buddy->lru_list_lock, flags);
  pg = adpm_check_insert_one_data_page(f, pgoff);
  if(IS_ERR(pg)){
    spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
    return PTR_ERR(pg);
  }
  adpm_vm_insert_pfn(vma, addr, adpm_page_to_real_pfn(pg, f->sbi), is_write);
  pg->rmap.pt = adpm_get_pte(vma->vm_mm, vma, addr);
  pg->rmap.addr = addr & (~(PAGE_SIZE - 1));
  pg->rmap.vma = vma;
  spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
  
  adpm_pr("map page done \n");
  return VM_FAULT_NOPAGE;
}
static int adpm_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
  unsigned long vaddr = (unsigned long)vmf->virtual_address & PAGE_MASK;
  int is_write = vmf->flags & FAULT_FLAG_WRITE;
  int ret;

  adpm_pr("!!generate page fault %lx  %s\n", vaddr, is_write ? "write":"read");
  ret = adpm_get_fault_page(vma, vaddr, is_write);
  return ret;
}
void adpm_file_drop_dirtys(struct adpm_client_file *f)
{
  unsigned long flags;
  struct rb_node *p;
  spin_lock_irqsave(&f->dirtys_lock, flags);
  while((p = f->dirtys.rb_node)){
    rb_erase(p, &f->dirtys);
    kfree(container_of(p, struct adpm_dirty, node));
  }
  spin_unlock_irqrestore(&f->dirtys_lock, flags);
}
//enter with lru locked
void adpm_file_drop_pages(struct adpm_client_file *f)
{
  unsigned long flags;
  struct rb_node *p;
  spin_lock_irqsave(&adpm_buddy->lru_list_lock, flags);
  spin_lock(&f->pages_lock);
  while((p = f->pages.rb_node)){
    rb_erase(p, &f->pages);
    adpm_buddy_free_one_page(container_of(p, struct adpm_page, vm_node));
  }
  f->pages.rb_node = NULL;
  spin_unlock(&f->pages_lock);
  spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
}
void adpm_del_file(char *name)
{
  struct adpm_client_file *f;
  unsigned long flags;
  f = adpm_del_file_from_table(name);
  if(!f){
    adpm_pr("can't find file\n");
    return;
  }
  adpm_file_drop_pages(f);
  adpm_file_drop_dirtys(f);
  f->magic = 0;
  kfree(f);
}

const struct vm_operations_struct adpm_vm_ops = {
  .fault    = adpm_vma_fault,
  .page_mkwrite = adpm_page_mkwrite,
};

int adpm_mmap(struct file *file, struct vm_area_struct *vma){
  struct adpm_client_file *f = file->private_data;
  long old_id, old_version, tot_size;
  unsigned long flags;
  
  if(!f){
    pr_err("ERROR: %s private file missing\n", __FUNCTION__);
    return -ENOENT;
  }
  if(atomic_add_return(1, &f->mmap_cnt) != 1){
    atomic_dec(&f->mmap_cnt);
    return -EEXIST;
  }

  old_id = f->base_info.id;
  old_version = f->base_info.version;
  tot_size = vma->vm_end - vma->vm_start + vma->vm_pgoff * PAGE_SIZE;
  
  if(adpm_server_get_base_info(adpm_hash_str(f->name), &f->base_info)){
    pr_err("ERROR: %s get base info error\n", __FUNCTION__);
    atomic_dec(&f->mmap_cnt);
    return -EIO;
  }
  if(old_id != f->base_info.id){
    // a new file with new id ,drop all the old pages;
    adpm_pr("old id != id\n");
    adpm_file_drop_pages(f);
    adpm_file_drop_dirtys(f);
  }
  if(!f->base_info.id){
    //create a new file
    if(adpm_server_create(f->name, &f->base_info, tot_size)){
      pr_err("ERROR: %s create file failed \n", __FUNCTION__);
      atomic_dec(&f->mmap_cnt);
      return -EIO;
    }
  }
  adpm_pr("file size: %lu\n", f->base_info.size);
  if(tot_size > f->base_info.size){
    pr_err("ERROR %s size too large\n", __FUNCTION__);
    atomic_dec(&f->mmap_cnt);
    return -E2BIG;
  }
  
  if(adpm_server_update(old_version, f->base_info.version, f)){
    pr_err("ERROR: %s update wrong\n", __FUNCTION__);
    atomic_dec(&f->mmap_cnt);
    return -EIO;
  }
  
  adpm_pr("vm_pgoff = %lu\n", vma->vm_pgoff);
  vma->vm_flags |= VM_SHARED;
  vma->vm_flags |= VM_WRITE|VM_READ;
  f->vminfo.vma = vma;
  f->vminfo.vm_end = vma->vm_end;
  f->vminfo.vm_start = vma->vm_start;
  f->vminfo.vm_pgoff = vma->vm_pgoff;
  f->vminfo.vm_flags = vma->vm_flags;

  file_accessed(file);
  vma->vm_ops = &adpm_vm_ops;
  adpm_pr("mmap done\n");
  return 0;
}

void adpm_clear_ftab(void){
  int bkt;
  struct hlist_node *ht;
  struct adpm_client_file *f;
  unsigned long flags;
  
  spin_lock(&adpm_ftab_lock);
  for(bkt = 0; bkt < HASH_SIZE(adpm_ftab); ++bkt){
    while((ht=adpm_ftab[bkt].first)){
      hash_del(ht);
      f = container_of(ht, struct adpm_client_file, hash_head);
      adpm_file_drop_pages(f);
      adpm_file_drop_dirtys(f);
      f->magic = 0;
      kfree(f);
    }
  }
  spin_unlock(&adpm_ftab_lock);
}
