#include "slave.h"
#include "simd.h"
#include "aceMesh_stack.h"
#include "aceMesh_utils.h"
#include <stddef.h>
#include <stdlib.h>
#ifdef TARG_SW5
#include "share.h"
#endif
#include "am_assert.h"
#if defined(TARG_SW5) || defined(TARG_SW9)
#include <stdbool.h>
#endif
//#include "compare_and_swap.h"

#ifdef MLOCK
extern inline void MS_lock(uint32_t lock_id);
extern inline void MS_unlock(uint32_t lock_id);
#endif


#ifdef _CIRCULAR_QUEUE
#include "ldm_privatequeue.h"
extern __thread_local_fix int mqueue_top;
extern __thread_local_fix int mqueue_bottom;
extern __thread_local_fix int mqueue_empty; 
extern __thread_local_fix unsigned short top;  //available
extern __thread_local_fix unsigned short bottom;  //available
extern __thread_local_fix struct task* ldmqueue[LDM_QUEUE_SIZE];
extern __thread_local_fix unsigned short ldmq_full;
#endif

/*
void slave_my_mutex_lock(volatile int* my_lock)
{
#ifdef TARG_SW5
#ifdef MUTEX_BASIC
    unsigned int __addr,__tmpi;
    __asm__ __volatile__(
        "        memb\n"
        "        ldi      %1,%2\n"
        "1:      faaw     %0,0(%1)\n"
        "        bne      %0,1b\n"
        "        memb\n"
      : "=&r"  (__tmpi),
        "=&r"  (__addr)
      :  "m" (*my_lock)
      : "memory");
    return;

#else
    unsigned int __tmp = 0;
    unsigned int __cnt;
    __asm__ __volatile__ (
    "0:     ldw     %[__tmp], %[my_lock]\n"
    "       beq     %[__tmp], 2f\n"
    "       ldi     %[__cnt], 50\n"
    "       sll     %[__cnt], 4, %[__cnt]\n"
    "1:     subw    %[__cnt], 1, %[__cnt]\n"
    "       bne     %[__cnt], 1b\n"
    "       br      0b\n"
    "2:     faaw    %[__tmp], %[my_lock]\n"
    "       bne     %[__tmp], 0b\n"
    "       memb    \n"
    "       br      3f\n"
    "3:     unop    \n"
    : [__tmp] "=&r"(__tmp), [__cnt] "=&r"(__cnt)
    : [my_lock] "m"(*(my_lock))
    : "memory");
    return;
#endif
#else
    assert(0);
#endif
}
int slave_my_mutex_unlock(volatile int* my_lock)
{
#ifdef TARG_SW5 
#ifdef MUTEX_BASIC
    unsigned int __addr ;
    __asm__ __volatile__(
        "        memb\n"
        "        ldi      %0,%1\n"
        "        stw      $31,0(%0)\n"
        "        memb\n"
      : "=&r"  (__addr)
      : "m" (*my_lock)
      : "memory");
    return;
#else
    unsigned int __tmp = 0;
    __asm__ __volatile__ (
    "       memb    \n"
    "       mov     0, %[__tmp]\n"
    "       stw     %[__tmp], %[my_lock]\n"
    : [__tmp] "=&r"(__tmp)
    : [my_lock] "m"(*(my_lock))
    : "memory"
    );
    return;
#endif
#else
    assert(0);
#endif
}
*/

struct task*  __sync_val_compare_and_swap(volatile int* my_lock
                       ,struct task  **mem
                       , struct task*  old
                       , struct task*  new)
{
#ifdef TARG_SW5
  unsigned int __addr,__cmp;
  unsigned int  __tmpi;
  struct task* __prev, *__tmpp;
  __asm__ __volatile__(
      "        memb\n"
      "        ldi      %3,%6\n"
      "1:      faaw     %2,0(%3)\n"
      "        bne      %2,1b\n"
      "        memb\n"
      "        ldl      %0,%5\n"
      "        cmpeq    %0,%7,%1\n"
      "        beq      %1,2f\n"
      "        mov      %8,%4\n"
      "        stl      %4,%5\n"
      "        mov      1,%1\n"
      "2:      memb\n"
      "        mov      %0,%4\n"
      "        stw      $31,0(%3)\n"
      "        memb\n"
      : "=&r"  (__prev),
        "=&r"  (__cmp),
        "=&r"  (__tmpi),
        "=&r"  (__addr),
        "=&r"  (__tmpp)
      : "m" (*(mem)),
        "m" (*my_lock),
        "Ir" (old),
        "Ir" (new)
      : "memory");
    return __tmpp;
#else
    assert(0);
#endif
}

#ifdef _CIRCULAR_QUEUE
  #ifdef MASTER
void slave_push_to_master(struct concurrent_aceMesh_stack* self,struct task* t)
{            
    struct task* old_top;
    do{
      old_top = self->task_pool;
      t->next = old_top;
    } while(__sync_val_compare_and_swap(&(self->my_lock),
                     &(self->task_pool),
                      old_top,t)!=old_top);
}
  #endif
void slave_push(struct task** base, struct  task* t)
{
    int newtop;
    base[mqueue_top]=t;
    newtop=(mqueue_top+1)%CIRCULAR_ARRAY_SIZE;
    AM_assert(newtop==mqueue_bottom,sorry: one seqqueue is full!\n)
    AM_output(push to %d,mqueue_top)
    mqueue_top=newtop;
    return;
}
int slave_try_pop(struct task** base,struct task** t)
{
   int newtop;
   if(mqueue_top!=mqueue_bottom){ //not empty
     newtop=(mqueue_top -1+CIRCULAR_ARRAY_SIZE)%CIRCULAR_ARRAY_SIZE;
     *t=base[newtop];
     mqueue_top=newtop;
     AM_output(pop %d,newtop)
     return 1;
  }
  *t=NULL;
  return 0;
}

  #ifdef _BULKTRANS

#define MQ_CURSIZE ((mqueue_top -mqueue_bottom+CIRCULAR_ARRAY_SIZE)%CIRCULAR_ARRAY_SIZE)
#define mymin(a,b) ((a<b)?(a):(b))
  
//called when ldmqueue spills
//two conditions:
//I bottom is vector aligned
//II mqueue_top is vector aligned
void slave_bulkspill(struct task** base)
{
   int i,j,total,modd,extra,from,to;
   double* destp,*srcp;

   AM_output1(bulkspill.);

   //TWO condtions
   //condition1: ldmqueue's bottom is aligned
   //since each spill discard multple of vectors
   //exch pop get in multple of vectors
   //I. 
   AM_assert1(bottom%VECTOR_DBL==0);

   //condition II: mqueue_top is aligned, 
   //since sequence is: pop,....each pop result an aligned top
   //and each spill still results an aligned top
   //no stealing in mqueue
   //II.
   AM_assert1(mqueue_top%VECTOR_DBL==0)
   
   //ldmque: VECl1,VECl2,..VEC,ODD1(old_top)(old_bottom), [ODD,VEC1,VEC2,...,]]
   //mqueue: .....(old_top) VEC1,VEC2,ODD,  <-new_top
   //now ODD1,ODD, are all vectors
   
   //II: adjust total to be multiple of VECTOR_DBL
   total=mymin(TASKCHUNK_DN,(LDM_QUEUE_SIZE-2));;
   modd=mqueue_top%VECTOR_DBL;
   extra=modd?(VECTOR_DBL-modd):0;
   total=total+extra;
   AM_assert1(total%VECTOR_DBL==0);

   //IV: now we have the schedule
   from=bottom;;
   to=mqueue_top;
   for(i=total;i>0;i-=VECTOR_DBL)
   {
     //move from to ...
     destp=(double*)&base[to];
     srcp=(double*)&ldmqueue[from];
     simd_store(*((doublev4*)srcp),destp);
     to=(to+VECTOR_DBL)%CIRCULAR_ARRAY_SIZE;
     from=from+VECTOR_DBL;
   }
   mqueue_top=(mqueue_top+total)%CIRCULAR_ARRAY_SIZE;
   bottom=(bottom+total)%LDM_QUEUE_SIZE;
   //V: ldmq_full=0;
   ldmq_full=0; 
   mqueue_empty=false;
   return;

}

//condition1:  called when ldmq empty!
//condition2: mqueue_empty=false
//condition3:no stealing
//allert: ldmq will not turn full during this process
int slave_try_bulkpop(struct task** base)
{
   int i,j,total,odd,odd2,extra,from,to,dist;
   double* destp,*srcp;

   AM_output1(bulkpop..);
   total=mymin(TASKCHUNK_UP,MQ_CURSIZE);
   total=mymin(total,LDM_QUEUE_SIZE);
   AM_assert1(total);// return 0;

   //NOTE: the order!
   //in mqueue: .....VEC1,VEC2,ODD  <-old_top
   //fill in ldmque: (old_top=any) 
   //after bulkpop,ldmque: [VEC1,VEC2,ODD]  <-new_top
   //BUT, you cannot popm, push, popm, push, ...
   
   //I: adjust ldmqueue's top-bottom and mqueue's top for alignment
   //two kinds of alignments
   //src:mqueue[mtop-4], use simd as much as possible
   //dest: ldmqueue[top], 
   if(top==bottom  && !ldmq_full){
      top=0;
      bottom=0;
   }
   
   //II: find the size of the only odd chunk 
   odd=mqueue_top%VECTOR_DBL;
   
   //III:(new_total-odd) should be mulple of VECTOR_DBL
   odd2=(total-odd)%VECTOR_DBL;
   extra=odd2?(VECTOR_DBL-odd2):0;
   total=total+extra;
   
   //IV: now we have the schedule
   from=(mqueue_top-total)%CIRCULAR_ARRAY_SIZE;
   to=top;
   for(dist=total;dist>0;dist-=VECTOR_DBL)
   {
      //move from to ...
      srcp=(double*)&base[from];
      destp=(double*)&ldmqueue[to];
      simd_load(*((doublev4*)destp),srcp);
      from=(from+VECTOR_DBL)%CIRCULAR_ARRAY_SIZE;
      to=to+VECTOR_DBL;
   }
   mqueue_top=(mqueue_top-total)%CIRCULAR_ARRAY_SIZE;
   top=total; //ldmqueue[0:total-1]

   mqueue_empty=true;
   return 1;
}
  #endif


#elif defined ( _SERIAL_QUEUE)
#ifdef MASTER
void slave_push_to_master(struct concurrent_aceMesh_stack* self,struct task* t)
{            
#ifdef mutex_lock
    struct task* old_top=NULL;
    slave_my_mutex_lock(&(self->my_lock));
    old_top = self->task_pool;
    t->next=old_top;
    self->task_pool=t;
    slave_my_mutex_unlock(&(self->my_lock));
#else
    struct task* old_top;
    do{
      old_top = self->task_pool;
      t->next = old_top;
    } while(__sync_val_compare_and_swap(&(self->my_lock),
                     &(self->task_pool),
                      old_top,t)!=old_top);
#endif
}
#endif
//void slave_push(struct aceMesh_stack* self,struct task* t)
void slave_push(struct concurrent_aceMesh_stack* self,struct task* t)
{ 
    //TODO: can be opt! less dereference
    t->next=self->task_pool;
    self->task_pool=t;
}

//int slave_try_pop(struct aceMesh_stack* self,struct task** t)
int slave_try_pop(struct concurrent_aceMesh_stack* self,struct task** t)
{
    //TODO: can be opt! less dereference

    struct task* old_top = self->task_pool;
    if( old_top == NULL )
       return  false;
    else{
       *t = old_top;
       self->task_pool = old_top->next;
       return true;
   }  
}


#if defined( ASYN_INSERT_Q ) 
int slave_concur_try_pop(struct concurrent_aceMesh_stack* self,struct task** t)
{
    #ifdef mutex_lock

    slave_my_mutex_lock(&(self->my_lock));
    struct task* old_top = self->task_pool;
    if( old_top == NULL ) {
        slave_my_mutex_unlock(&(self->my_lock));
        return  false;
    }
    else{
       *t = old_top; //3r1w
       self->task_pool = old_top->next;
       slave_my_mutex_unlock(&(self->my_lock));
       return true;
    }  


    #else

    struct task* old_top = self->task_pool;
    if( old_top == NULL )
    {
       return  false;
    }else{
       struct task* new_top = old_top->next;
       if(__sync_val_compare_and_swap(&(self->my_lock), &(self->task_pool),old_top,new_top)==old_top)
       {
          *t = old_top; 
          return true;
       }else{
          *t=NULL;
          return false;
       }
    }
    #endif
}

#endif



#else
#ifdef TARG_SW5

    #ifdef CENTRAL_FIFO
inline int slave_queue_empty(struct concurrent_aceMesh_stack* self)
{
    return self->head_ptr == NULL;
}
    #endif


void slave_push(struct concurrent_aceMesh_stack* self,struct task* t)
{     
#ifdef CENTRAL_FIFO

    slave_my_mutex_lock(&(self->my_lock));
    if (slave_queue_empty(self)) {
        self->head_ptr = t;
        self->tail_ptr = t;
    }
    else {
        self->tail_ptr->next = t;
        self->tail_ptr = t; //4r2w
    }
    slave_my_mutex_unlock(&(self->my_lock));

#else
    #ifdef mutex_lock

    struct task* old_top=NULL;
    slave_my_mutex_lock(&(self->my_lock));
    old_top = self->task_pool; //3r2w
    t->next=old_top;
    self->task_pool=t;
    slave_my_mutex_unlock(&(self->my_lock));

    #elif defined(MLOCK) && !defined(DISTRIBUTED_SCHEDULER)

    struct task* old_top=NULL;
    MS_lock(SL_SCHED);
    old_top = self->task_pool; //3r2w
    t->next=old_top;
    self->task_pool=t;
    asm volatile ("memb\n");
    MS_unlock(SL_SCHED);

    #else

    struct task* old_top;
    do{
      old_top = self->task_pool;
      t->next = old_top;
    } while(__sync_val_compare_and_swap(&(self->my_lock),
                     &(self->task_pool),
                      old_top,t)!=old_top);
    
    #endif
#endif
//#ifdef ACEMESH_SCHEDULER_PROFILING
//    all_task_nums[_MYID]++;
//#endif

}


//int slave_try_push(struct concurrent_aceMesh_stack* self,struct task* t)
//{
//    struct task* old_top = self->task_pool;
//    t->next = old_top;
///*#ifdef ACEMESH_SCHEDULER_PROFILING
//		all_task_nums[_MYID]++;
//#endif*/
//    if(__sync_val_compare_and_swap(&(self->my_lock), &(self->task_pool),old_top,t)==old_top){
//       return true;
//    }
//      return false;
//}
//
//void slave_pop(struct task** t)
//{
//}
//

int slave_try_pop(struct concurrent_aceMesh_stack* self,struct task** t)
{
#ifdef CENTRAL_FIFO

    slave_my_mutex_lock(&(self->my_lock));
    struct task * temp = self->head_ptr;
    //if (slave_queue_empty(self)) {
    if (temp == NULL) {
        slave_my_mutex_unlock(&(self->my_lock));
        return false;
    }
    else {
          //3r1w
        *t = temp;
        self->head_ptr = temp->next;
        slave_my_mutex_unlock(&(self->my_lock));
        return true;
    }

#else

    #ifdef mutex_lock

    slave_my_mutex_lock(&(self->my_lock));
    struct task* old_top = self->task_pool;
    if( old_top == NULL ) {
        slave_my_mutex_unlock(&(self->my_lock));
        return  false;
    }
    else{
       *t = old_top; //3r1w
       self->task_pool = old_top->next;
       slave_my_mutex_unlock(&(self->my_lock));
       return true;
    }  


    #elif defined(MLOCK) && !defined(DISTRIBUTED_SCHEDULER)

    MS_lock(SL_SCHED);
    struct task* old_top = self->task_pool;
    if( old_top == NULL ) {
        MS_unlock(SL_SCHED);
        return  false;
    }
    else{
       *t = old_top; //3r1w
       self->task_pool = old_top->next;
       asm volatile ("memb\n");
       MS_unlock(SL_SCHED);
       return true;
    }  

    #else

    struct task* old_top = self->task_pool;
    if( old_top == NULL )
    {
       return  false;
    }else{
       struct task* new_top = old_top->next;
       if(__sync_val_compare_and_swap(&(self->my_lock), &(self->task_pool),old_top,new_top)==old_top)
       {
          *t = old_top; 
          return true;
       }else{
          *t=NULL;
          return false;
       }
   }

    #endif

#endif
}
int slave_empty()
{
    return false;
}
/*#ifdef MPI_SURPPORT
void slave_q_push(struct aceMesh_queue* self ,struct task* t)
{
    t->next=NULL;
    self->tail->next=t;
    self->tail=t;
}
int slave_q_try_pop(struct aceMesh_queue* self, struct task* t)
{
    if(self->head==NULL)
        return false;
    t=self->head;
    self->head=t->next;
    if(t==NULL)
        return false;
    else
        return true;
}
#endif*/

#elif TARG_SW9
__thread_local unsigned long slave_temp_cas[8] __attribute__((aligned(64))) = {1111,2222,3333,4444,0,0,0,0};
#define _CAS(Va,_addr_) \
{    asm volatile( "casl %0, 0(%1)\n\t"\
                   : "+r"(Va)\
                   :  "r"(_addr_)\
                   : "memory");\
}
//unsigned long __sync_bool_compare_and_swap(unsigned long* my_value, unsigned long old_val,unsigned long new_val)
unsigned long __sync_bool_compare_and_swap(struct task ** my_value, struct task* old_val,struct task* new_val)
{
    uint512 *p=NULL;
    p=(uint512*)(&slave_temp_cas[0]);
//    printf("slave_temp_cas addr=%p, %p,%p\n", p,&slave_temp_cas[0],&p);
//   printf("my_addr=%p,%p, old_val=%d,%p,  new_val=%d,%p\n", my_addr, &my_addr, old_val, &old_val, new_val, &new_val);
    slave_temp_cas[0]=0;
    slave_temp_cas[1]=(unsigned long)old_val;
    slave_temp_cas[2]=(unsigned long )new_val;
    #ifdef DEBUG
//    simd_fprint_uint512(stderr,*p);
    #endif
    _CAS(*p,my_value);
    #ifdef DEBUG
//    simd_fprint_uint512(stderr,*p);
    #endif
    return slave_temp_cas[0];
}

//__thread_local_fix unsigned long push_time=0,pop_time=0,temp_=0;
//__thread_local_fix int push_i=0,pop_i=0;
void slave_push(struct concurrent_aceMesh_stack* self,struct task* t)
{
//printf("start_push");
struct task* old_top;
#ifdef mutex_lock
//push_i++;
//temp_=rtc();
slave_my_mutex_lock(&(self->my_lock));
 //if(push_i==10)
 //push_time+=rtc()-temp_;
 //if(push_i==10)
 //printf("10 times push lock time:%ld\n",push_time);
 old_top = self->task_pool;
 self->task_pool=t;
  self->task_pool->next = old_top;
slave_my_mutex_unlock(&(self->my_lock));
//printf(" end_push\n");
#else   
    do
    {
        old_top = self->task_pool;
        t->next = old_top;
    }while(!__sync_bool_compare_and_swap(&(self->task_pool),old_top,t));
//    __sync_bool_compare_and_swap(t);
#endif
}

int slave_try_pop(struct concurrent_aceMesh_stack* self,struct task** t)
{
    struct task* old_top;
//#ifdef DEBUG
////    printf("try_pop,self_addr=%p,t=%p,old_top=%p\n",self->task_pool,*t,old_top);
//#endif
//printf("try_pop\n");
#ifdef mutex_lock
//pop_i++;
//temp_=rtc();
slave_my_mutex_lock(&(self->my_lock));
//pop_time+=rtc()-temp_;
//if(pop_i==10)
//printf("10 times pop lock time:%ld\n",pop_time);
old_top = self->task_pool;
if( old_top == NULL ) {
        slave_my_mutex_unlock(&(self->my_lock));
        return  false;
    }
    else{
       *t = old_top; //3r1w
       self->task_pool = old_top->next;
       slave_my_mutex_unlock(&(self->my_lock));
       return true;
    }  
#else
    old_top = self->task_pool;
    if(old_top == NULL)
    {
        return false;
    }
    else
    {
        struct task* new_top = old_top->next;
        if(__sync_bool_compare_and_swap(&(self->task_pool),old_top,new_top))
        {
            *t = old_top;
            return true;
        }
        else
        {
            *t = NULL;
            return false;
        }
    }
#endif
}

#endif

#endif

#ifdef SEG_BUFF
void slave_ser_push(struct aceMesh_stack* self,struct task* t)
{ 
    //TODO: can be opt! less dereference
#ifdef ARRAY_STACK
    struct task** base=self->base;
    int cur_top=self->top;
    base[cur_top]=t;
    cur_top=cur_top+1;
    if(cur_top== PENDING_BUFF_SIZE-1)
    {
        printf("mqueue is full\n");
        exit(1);
    }
    self->top=cur_top;
    //printf("task_queu_size=%d\n",self->top);
#else
    t->next=self->task_pool;
    self->task_pool=t;
#endif
}

int slave_ser_try_pop(struct aceMesh_stack* self,struct task** t)
{
#ifdef ARRAY_STACK
    struct task** base=self->base;
    int cur_top=self->top;
    if(cur_top==0) 
        return false;
    else
    {
        *t=base[--cur_top];
        self->top=cur_top;
        return true;
    }
#else
    struct task* old_top = self->task_pool;
    if( old_top == NULL )
       return  false;
    else{
       *t = old_top;
       self->task_pool = old_top->next;
       return true;
   }  
#endif
}

#endif
