#include "alloc.h"

#define ADPM_SWAP_LINE_FACT 0.3
#define ADPM_SWAP_LOW_FACT  0.15

struct task_struct *adpm_kswapd_task = NULL;
struct adpm_allocator *adpm_buddy = NULL;
static wait_queue_head_t adpm_kswapd_wqh;

bool adpm_should_swap(void)
{
  long nr_free;
  unsigned long flags;
  spin_lock_irqsave(&adpm_buddy->free_lock, flags);
  nr_free = adpm_buddy->nr_free;
  spin_unlock_irqrestore(&adpm_buddy->free_lock, flags);
  if(nr_free < adpm_buddy->swap_line)return true;
  return false;
}
int adpm_page_accessed(struct *adpm_page pg)
{
  int accessed;
  pte_t *p = pg->rmap.pt;
  if(p){
    accessed = test_and_clear_bit(_PAGE_BIT_ACCESSED, (unsigned long)&p->pte);
    if(accessed)flush_tlb_page(pg->rmap.vma, pg->rmap.addr);
    return accessed;
  }
  return 0;
}
void adpm_shrink_act(long nr_scan)
{
  unsigned long flags;
  struct adpm_page *pg;
  int accessed;
  spin_lock_irq(&adpm_buddy->lru_list_lock, flags);
  nr_scan = min(nr_scan, adpm_buddy->nr_act_list);
  while(nr_scan){
    pg = container_of(&adpm_buddy->act_list.prev, struct adpm_page, lru);
    spin_lock(&pg->lru_lock);
    BUG_ON(adpm_page_get_stat(pg) != ADPM_PAGE_STAT_ACTV);
    if(adpm_get_swap_flag(pg)){ //can not swap
      list_move(&adpm_buddy->act_list, &pg->lru);// move the the head
    }else{
      if(adpm_page_accessed(pg)){
        list_move(&adpm_buddy->act_list, &pg->lru);
      }else{
        list_move(&adpm_buddy->inact_list, &pg->lru);
        adpm_page_set_stat(pg, ADPM_PAGE_STAT_INAC);
        --adpm_buddy->nr_act_list;
        ++adpm_buddy->nr_inact_list;
      }
    }
    --nr_scan;
    spin_unlock(&pg->lru_lock);
  }
  spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
}
long adpm_shrink_inact(long nr_scan)
{
  unsigned long flags;
  struct adpm_page *pg;
  struct adpm_client_file *f;
  int accessed;
  bool is_free;
  spin_lock_irq(&adpm_buddy->lru_list_lock, flags);
  nr_scan = min(nr_scan, adpm_buddy->nr_inact_list);
  while(nr_scan){
    is_free = false;
    pg = container_of(&adpm_buddy->inact_list, struct adpm_page, lru);
    spin_lock(&pg->lru_lock);
    BUG_ON(adpm_page_get_stat(pg) != ADPM_PAGE_STAT_INAC);
    if(adpm_get_swap_flag(pg)){
      list_move(&adpm_buddy->act_list, &pg->lru);
      adpm_page_set_stat(pg, ADPM_PAGE_STAT_ACTV);
      --adpm_buddy->nr_inact_list;
      ++adpm_buddy->nr_act_list;
    }else{
      if(adpm_page_accessed(pg)){
        list_move(&adpm_buddy->act_list, &pg->lru);
        adpm_page_set_stat(pg, ADPM_PAGE_STAT_ACTV);
        --adpm_buddy->nr_inact_list;
        ++adpm_buddy->nr_act_list;
      }else{ // free!
        is_free = true;
      }
    }
    spin_unlock(&pg->lru_lock);
    if(is_free){
      native_ptep_get_and_clear(pg->rmap.pt);
      pg->rmap.pt = NULL;
      f = pg->f;
      spin_lock(&f->pages_lock);
      rb_erase(pg, &f->pages);
      spin_unlock(&f->pages_lock);
      adpm_buddy_free_one_page(pg);
    }
    --nr_scan;
  }
  spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
}
long adpm_do_balance(long nr_scan)
{
  adpm_pr("adpm do balance\n");
  adpm_shrink_act(nr_scan);
  return adpm_shrink_inact(nr_scan);
}
static int adpm_kswapd(void *data)
{
  int ret;
  unsigned long timeo;
  long nr_reclaimed;

  adpm_pr("adpm_kswapd started\n");
  current->flags |= PF_MEMALLOC | PF_KSWAPD;
  timeo = HZ / 10; //per 0.1s
  
  for(;;){
    ret = wait_event_interruptible_timeout(adpm_kswapd_wqh, adpm_should_swap(), timeo);
    if(kthread_should_stop())break;
    if(ret == -ERESTARTSYS){
      pr_err("ERROR: %s wait event wake up by signal\n", __FUNCTION__);
      break;
    }
    if(ret == 1){
      nr_reclaimed = adpm_do_balance(100);
      pr_err("reclaimed %ld pages\n", nr_reclaimed);
    }
  }
  adpm_pr("adpm_kswapd stoped\n");
  return 0;
}

int adpm_init_buddy(struct adpm_page *base_vir, long start_pfn ,long nr_pages, struct adpm_sb_info *sbi)
{
  int i;
  unsigned long flags;
  adpm_buddy = kzalloc(sizeof(*adpm_buddy),GFP_KERNEL);
  if(!adpm_buddy)
    return -ENOMEM;
  
  spin_lock_init(&adpm_buddy->free_lock);
  spin_lock_init(&adpm_buddy->lru_list_lock);
  spin_lock_init(&adpm_buddy->dirty_list_lock);

  init_waitqueue_head(&adpm_kswapd_wqh);
  
  INIT_LIST_HEAD(&adpm_buddy->free);
  INIT_LIST_HEAD(&adpm_buddy->inact_list);
  INIT_LIST_HEAD(&adpm_buddy->act_list);
  INIT_LIST_HEAD(&adpm_buddy->dirty_list);
  
  adpm_buddy->nr_free = 0;
  adpm_buddy->start_pfn = start_pfn;
  adpm_buddy->virt_base = (unsigned long)base_vir;
  adpm_buddy->nr_act_list = 0;
  adpm_buddy->nr_inact_list = 0;
  adpm_buddy->nr_dirty_list = 0;
  adpm_buddy->sbi = sbi;
  
  spin_lock_irqsave(&adpm_buddy->free_lock, flags);
  for(i = start_pfn; i < nr_pages; i++){
    spin_lock(&(base_vir + i)->lru_lock);
    adpm_page_set_stat(base_vir + i, ADPM_PAGE_STAT_FREE);
    list_add(&((base_vir+i)->lru), &adpm_buddy->free);
    (base_vir + i)->dirty_cnt = 0;
    spin_unlock(&(base_vir + i)->lru_lock);
    ++adpm_buddy->nr_free;
  }
  adpm_buddy->swap_line = adpm_buddy->nr_free * ADPM_SWAP_LINE_FACT;
  adpm_buddy->swap_low =  adpm_buddy->nr_free * ADPM_SWAP_LOW_FACT;
  spin_unlock_irqrestore(&adpm_buddy->free_lock, flags);

  adpm_buddy->dirty_struct_pool = kmem_cache_create("adpm dirty struct pool",
                         sizeof(struct adpm_dirty), 0, SLAB_RECLAIM_ACCOUNT, NULL);
  if(!adpm_buddy->dirty_struct_pool)
    return -ENOMEM;
  
  adpm_kswapd_task = kthread_run(adpm_kswapd, NULL, "adpm-kswapd");
  if(IS_ERR(adpm_kswapd_task)){
    int ret = PTR_ERR(adpm_kswapd_task);
    adpm_kswapd_task = NULL;
    kmem_cache_destroy(adpm_buddy->dirty_struct_pool);
    adpm_buddy->dirty_struct_pool = NULL;
    pr_err("ERROR: %s swapd create wrong\n", __FUNCTION__);
    return ret;
  }
  return 0;
}

void adpm_destroy_buddy(void)
{
  if(adpm_kswapd_task){
    kthread_stop(adpm_kswapd_task);
    adpm_kswapd_task = NULL;
  }
  if(adpm_buddy->dirty_struct_pool){
    kmem_cache_destroy(adpm_buddy->dirty_struct_pool);
    adpm_buddy->dirty_struct_pool = NULL;
  }
  if(adpm_buddy){
    kfree(adpm_buddy);
    adpm_buddy = NULL;
  }
}
// enter with lru_list_lock locked
struct adpm_page *adpm_buddy_get_one_page(bool no_swap)
{
  struct adpm_page *p = NULL;
  spin_lock(&adpm_buddy->free_lock);
  if(adpm_buddy->nr_free){ // just return the free page;
    p = container_of(adpm_buddy->free.next, struct adpm_page, lru);
    list_del(&p->lru);
    --adpm_buddy->nr_free;
    BUG_ON(adpm_page_get_stat(p) != ADPM_PAGE_STAT_FREE);
  }else{
    pr_err("ERROR: %s on free page\n", __FUNCTION__);
    // force to reclaim the pages
    // TODO
  }
  spin_unlock(&adpm_buddy->free_lock);
  if(p){// put the page into the inactive list
    spin_lock(&p->lru_lock);
    adpm_page_set_stat(p, ADPM_PAGE_STAT_INAC);
    if(no_swap)
      adpm_set_swap_flag(p);
    list_add(&p->lru, &adpm_buddy->inact_list);
    spin_unlock(&p->lru_lock);
    ++adpm_buddy->nr_inact_list;
  }
  return p;
}
//enter with lru list locked
void adpm_buddy_free_one_page(struct adpm_page *p)
{
  unsigned long flags;
  //del from lru list
  BUG_ON(adpm_page_get_stat(p) == ADPM_PAGE_STAT_FREE);
  spin_lock(&p->lru_lock);
  spin_lock(&adpm_buddy->dirty_list_lock);
  list_del(&p->lru);
  if(adpm_page_get_stat(p) == ADPM_PAGE_STAT_INAC){
    --adpm_buddy->nr_inact_list;
  }else if(adpm_page_get_stat(p) == ADPM_PAGE_STAT_ACTV){
    --adpm_buddy->nr_act_list;
  }else{
    BUG_ON(adpm_page_get_stat(p) != ADPM_PAGE_STAT_DIRT);
    --adpm_buddy->nr_dirty_list;
    p->dirty_cnt = 0;
  }
  p->flag = 0;
  p->f = NULL;
  spin_unlock(&adpm_buddy->dirty_list_lock);
  spin_unlock(&p->lru_lock);

  //add to free list
  spin_lock_irqsave(&adpm_buddy->free_lock, flags);
  spin_lock(&p->lru_lock);
  adpm_page_set_stat(p, ADPM_PAGE_STAT_FREE);
  list_add(&p->lru, &adpm_buddy->free);
  ++adpm_buddy->nr_free;
  spin_unlock(&p->lru_lock);
  spin_unlock_irqrestore(&adpm_buddy->free_lock, flags);
}

//enter with lru list locked
static int adpm_chk_ins_isol_pages(struct adpm_client_file *f, struct adpm_page **pgs, long start_pgoff, long end_pgoff)
{
  unsigned long flags;
  struct adpm_page *pg = NULL;
  long n, i, j;
  n = 0;
  spin_lock_irqsave(&f->pages_lock, flags);
  for(i = start_pgoff; i < end_pgoff; i++){
    pg = adpm_file_find_page(f, i);
    if(!pg){
      pg = adpm_insert_one_data_page(f, i, true);
    }else{
      spin_lock(&pg->lru_lock);
      adpm_set_swap_flag(pg);
      spin_unlock(&pg->lru_lock);
    }
    if(IS_ERR(pg)){
      for(j = 0; j < n; j++){
        spin_lock(&pgs[j]->lru_lock);
        if(!pgs[j]->dirty_cnt)
          adpm_clr_swap_flag(pgs[j]);
        spin_unlock(&pgs[j]->lru_lock);
      }
      spin_unlock_irqrestore(&f->pages_lock, flags);
      break;
    }
    pgs[n++] = pg;
  }
  spin_unlock_irqrestore(&f->pages_lock, flags);
  if(IS_ERR(pg))
    return PTR_ERR(pg);
  return 0;
}
int adpm_make_dirty(struct adpm_client_file *f, unsigned long vir_start, unsigned long len)
{
  struct adpm_dirty *dir;
  long start_pgoff, end_pgoff, i, tot_pgs;
  unsigned long flags;
  struct adpm_page *pg, **pgs;
  int ret;
  
  start_pgoff = (vir_start - f->vminfo.vm_start) >> PAGE_SHIFT;
  end_pgoff = (vir_start - f->vminfo.vm_start + len + PAGE_SIZE - 1)>>PAGE_SHIFT;
  tot_pgs = end_pgoff - start_pgoff;
  
  pgs = kzalloc(tot_pgs * sizeof(struct adpm_page **), GFP_KERNEL);
  if(!pgs)
    return -ENOMEM;

  dir = kmem_cache_zalloc(adpm_buddy->dirty_struct_pool, GFP_KERNEL);
  if(!dir)
    return -ENOMEM;
  spin_lock_irqsave(&adpm_buddy->lru_list_lock, flags);
  if((ret = adpm_chk_ins_isol_pages(f, pgs, start_pgoff, end_pgoff))){
    spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
    kfree(pgs);
    return ret;
  }
  for(i = 0; i < tot_pgs; i++){
    pg = pgs[i];
    spin_lock(&pg->lru_lock);
    ++pg->dirty_cnt;
    BUG_ON(adpm_page_get_stat(pg) == ADPM_PAGE_STAT_FREE);
    if(adpm_page_get_stat(pg) == ADPM_PAGE_STAT_ACTV){
      list_del(&pg->lru);
      --adpm_buddy->nr_act_list;
      ++adpm_buddy->nr_dirty_list;
      spin_lock(&adpm_buddy->dirty_list_lock);
      list_add(&pg->lru, &adpm_buddy->dirty_list);
      spin_unlock(&adpm_buddy->dirty_list_lock);
    }else if(adpm_page_get_stat(pg) == ADPM_PAGE_STAT_INAC){
      list_del(&pg->lru);
      --adpm_buddy->nr_inact_list;
      ++adpm_buddy->nr_dirty_list;
      spin_lock(&adpm_buddy->dirty_list_lock);
      list_add(&pg->lru, &adpm_buddy->dirty_list);
      spin_unlock(&adpm_buddy->dirty_list_lock);
    }
    adpm_page_set_stat(pg, ADPM_PAGE_STAT_DIRT);
    spin_unlock(&pg->lru_lock);
  }
  spin_unlock_irqrestore(&adpm_buddy->lru_list_lock, flags);
  
  dir->start_vir = vir_start;
  dir->end_vir = vir_start + len;
  adpm_file_insert_dirty(f, dir);
  return 0;
}


