#include "types.h"
#include "defs.h"
#include "param.h"
#include "memlayout.h"
#include "mmu.h"
#include "x86.h"
#include "proc.h"
#include "kthread.h"
#include "spinlock.h"

struct ptable_t {
  struct spinlock lock;
  struct proc proc[NPROC];
};

struct {
  struct spinlock lock;
  struct mutex {
    struct proc* owner;
    struct myQueue q;
    int created;		// Indicates that the lock is in use.
  } mutex[MAX_MUTEXES];
} mutex_table;

struct {
  struct spinlock lock;
  struct {
    struct proc* waiting[NPROC];	// All proc waiting on this CV.
    int created;			// Indicates that the lock is in use.
  } cv[MAX_CONDS];
} CV_table;

struct ptable_t ptable;

static struct proc *initproc;

int nextpid = 1;
extern void forkret(void);
extern void trapret(void);

static void wakeup1(void *chan);

void
pinit(void)
{
  int i, j;
  initlock(&ptable.lock, "ptable");

  initlock(&mutex_table.lock, "ptable");
  for(i=0; i<MAX_MUTEXES; i++){
    mutex_table.mutex[i].owner = 0;
    mutex_table.mutex[i].created = 0;
    mutex_table.mutex[i].q.first = 0;
    mutex_table.mutex[i].q.next = 0;
    for(j=0; j<NPROC; j++)
      mutex_table.mutex[i].q.procs[j] = 0;
  }
  initlock(&CV_table.lock, "ptable");
  for(i=0; i<MAX_CONDS; i++){
    CV_table.cv[i].created = 0;
    for(j=0; j<NPROC; j++){
      CV_table.cv[i].waiting[j] = 0;
    }
  }
}

int uptime()
{
    uint xticks;
    
    acquire(&tickslock);
    xticks = ticks;
    release(&tickslock);
    return xticks;
}
//PAGEBREAK: 32
// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and initialize
// state required to run in the kernel.
// Otherwise return 0.
static struct proc*
allocproc(void)
{
  struct proc *p;
  char *sp;

  acquire(&ptable.lock);
  for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
    if(p->state == UNUSED)
      goto found;
  release(&ptable.lock);
  return 0;

found:
  p->state = EMBRYO;
  p->pid = nextpid++;
  release(&ptable.lock);

  // Allocate kernel stack.
  if((p->kstack = kalloc()) == 0){
    p->state = UNUSED;
    return 0;
  }
  sp = p->kstack + KSTACKSIZE;
  
  // Leave room for trap frame.
  sp -= sizeof *p->tf;
  p->tf = (struct trapframe*)sp;
  
  // Set up new context to start executing at forkret,
  // which returns to trapret.
  sp -= 4;
  *(uint*)sp = (uint)trapret;

  sp -= sizeof *p->context;
  p->context = (struct context*)sp;
  memset(p->context, 0, sizeof *p->context);
  p->context->eip = (uint)forkret;

  return p;
}

//PAGEBREAK: 32
// Set up first user process.
void
userinit(void)
{
  struct proc *p;
  extern char _binary_initcode_start[], _binary_initcode_size[];
  
  p = allocproc();
  initproc = p;
  if((p->pgdir = setupkvm(kalloc)) == 0)
    panic("userinit: out of memory?");
  inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
  p->sz = PGSIZE;
  memset(p->tf, 0, sizeof(*p->tf));
  p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
  p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
  p->tf->es = p->tf->ds;
  p->tf->ss = p->tf->ds;
  p->tf->eflags = FL_IF;
  p->tf->esp = PGSIZE;
  p->tf->eip = 0;  // beginning of initcode.S

  safestrcpy(p->name, "initcode", sizeof(p->name));
  p->cwd = namei("/");

  p->state = RUNNABLE;
}

// Grow current process's memory by n bytes.
// Return 0 on success, -1 on failure.
int
growproc(int n)
{
  uint sz;
  
  sz = proc->sz;
  if(n > 0){
    if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
      return -1;
  } else if(n < 0){
    if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0)
      return -1;
  }
  proc->sz = sz;
  switchuvm(proc);
  return 0;
}

// Create a new process copying p as the parent.
// Sets up stack to return as if from system call.
// Caller must set state of returned proc to RUNNABLE.
int
fork(void)
{
  int i, pid;
  struct proc *np;

  // Allocate process.
  if((np = allocproc()) == 0)
    return -1;

  // Copy process state from p.
  if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
    kfree(np->kstack);
    np->kstack = 0;
    np->state = UNUSED;
    return -1;
  }
  np->sz = proc->sz;
  np->parent = proc;
  *np->tf = *proc->tf;
  
  // Taking care about future kernel threads stuff
  np->threadCounter = (int*)kalloc();
  np->nextTID = np->threadCounter + sizeof(int)*3;
  *np->threadCounter = 1;		// Accessing a pointer, but no need to acquire lock, because no one else know about this yet. (still not in ptable)
  *np->nextTID = 1;
  np->tid = 0;

    np->ctime = uptime();
    np->rtime = 0;
    np->etime = 0;
    
  // Clear %eax so that fork returns 0 in the child.
  np->tf->eax = 0;

  for(i = 0; i < NOFILE; i++)
    if(proc->ofile[i])
      np->ofile[i] = filedup(proc->ofile[i]);
  np->cwd = idup(proc->cwd);
 
  pid = np->pid;
  np->state = RUNNABLE;
  safestrcpy(np->name, proc->name, sizeof(proc->name));
  return pid;
}

// Exit the current process.  Does not return.
// An exited process remains in the zombie state
// until its parent calls wait() to find out it exited.
void
exit(void)
{
  struct proc *p;
  int fd;
  //   cprintf("- starting to exit proc with pid: %d, tid: %d\n", proc->pid, proc->tid);

  if(proc == initproc)
    panic("init exiting");

  // Close all open files.
  for(fd = 0; fd < NOFILE; fd++){
    if(proc->ofile[fd]){
      fileclose(proc->ofile[fd]);
      proc->ofile[fd] = 0;
    }
  }

  iput(proc->cwd);
  proc->cwd = 0;

  acquire(&ptable.lock);

  // Parent might be sleeping in wait().
  wakeup1(proc->parent);

  // Pass abandoned children to init.
  for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    if(p->parent == proc){
      p->parent = initproc;
      if(p->state == ZOMBIE)
        wakeup1(initproc);
    }
  }

  // Jump into the scheduler, never to return.
  proc->state = ZOMBIE;
  *proc->threadCounter = *proc->threadCounter - 1;
  proc->etime = uptime();
  sched();
  panic("zombie exit");
}

// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
wait(void)
{
  struct proc *p;
  struct proc *p1;
  int havekids, pid;

  acquire(&ptable.lock);
  for(;;){
    // Scan through table looking for zombie children.
    havekids = 0;
    for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
      if(p->parent != proc) // || *p->threadCounter >= 0)
        continue;
//       cprintf("wait: counter: %d, pid: %d, tid: %d\n", *p->threadCounter, p->pid, p->tid);
      havekids = 1;
      if(*p->threadCounter > 0)
        continue;

        pid = p->pid;
            for(p1 = ptable.proc; p1 < &ptable.proc[NPROC]; p1++){
                if(p1->pid == pid){
                    // 	  cprintf("  done\n");
                    // Found one.
                    
        kfree(p1->kstack);
        p1->kstack = 0;
        p1->state = UNUSED;
        p1->pid = 0;
        p1->parent = 0;
        p1->name[0] = 0;
        p1->killed = 0;
                }
            }
        if(*p->threadCounter == 0){
            freevm(p->pgdir);
            release(&ptable.lock);
        return pid;
      }
    }

    // No point waiting if we don't have any children.
    if(!havekids || proc->killed){
      release(&ptable.lock);
      return -1;
    }

        //     cprintf("---sleep %s\n", proc->name);
    // Wait for children to exit.  (See wakeup1 call in proc_exit.)
    sleep(proc, &ptable.lock);  //DOC: wait-sleep
  }
}

void
register_handler(sighandler_t sighandler)
{
  char* addr = uva2ka(proc->pgdir, (char*)proc->tf->esp);
  if ((proc->tf->esp & 0xFFF) == 0)
    panic("esp_offset == 0");

    /* open a new frame */
  *(int*)(addr + ((proc->tf->esp - 4) & 0xFFF))
          = proc->tf->eip;
  proc->tf->esp -= 4;

    /* update eip */
  proc->tf->eip = (uint)sighandler;
}


//PAGEBREAK: 42
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns.  It loops, doing:
//  - choose a process to run
//  - swtch to start running that process
//  - eventually that process transfers control
//      via swtch back to the scheduler.
void
scheduler(void)
{
  struct proc *p;//the next process, proc is always the current process

  for(;;){
    // Enable interrupts on this processor.
    sti();

    // Loop over process table looking for process to run.
    acquire(&ptable.lock);
    for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
      if(p->state != RUNNABLE)
        continue;

      // Switch to chosen process.  It is the process's job
      // to release ptable.lock and then reacquire it
      // before jumping back to us.
      proc = p;
      switchuvm(p);
      p->state = RUNNING;
      swtch(&cpu->scheduler, proc->context);
      switchkvm();

      // Process is done running for now.
      // It should have changed its p->state before coming back.
      proc = 0;
    }
    release(&ptable.lock);

  }
}

// Enter scheduler.  Must hold only ptable.lock
// and have changed proc->state.
void
sched(void)
{
  int intena;

  if(!holding(&ptable.lock))
    panic("sched ptable.lock");
  if(cpu->ncli != 1)
    panic("sched locks");
  if(proc->state == RUNNING)
    panic("sched running");
  if(readeflags()&FL_IF)
    panic("sched interruptible");
  intena = cpu->intena;
  swtch(&proc->context, cpu->scheduler);
  cpu->intena = intena;
}

// Give up the CPU for one scheduling round.
void
yield(void)
{
  acquire(&ptable.lock);  //DOC: yieldlock
  proc->state = RUNNABLE;
  sched();
  release(&ptable.lock);
}

// A fork child's very first scheduling by scheduler()
// will swtch here.  "Return" to user space.
void
forkret(void)
{
  static int first = 1;
  // Still holding ptable.lock from scheduler.
  release(&ptable.lock);

  if (first) {
    // Some initialization functions must be run in the context
    // of a regular process (e.g., they call sleep), and thus cannot 
    // be run from main().
    first = 0;
    initlog();
  }
  
  // Return to "caller", actually trapret (see allocproc).
}

// Atomically release lock and sleep on chan.
// Reacquires lock when awakened.
void
sleep(void *chan, struct spinlock *lk)
{
  if(proc == 0)
    panic("sleep");

  if(lk == 0)
    panic("sleep without lk");

  // Must acquire ptable.lock in order to
  // change p->state and then call sched.
  // Once we hold ptable.lock, we can be
  // guaranteed that we won't miss any wakeup
  // (wakeup runs with ptable.lock locked),
  // so it's okay to release lk.
  if(lk != &ptable.lock){  //DOC: sleeplock0
    acquire(&ptable.lock);  //DOC: sleeplock1
    release(lk);
  }

  // Go to sleep.
  proc->chan = chan;
  proc->state = SLEEPING;
  sched();

  // Tidy up.
  proc->chan = 0;

  // Reacquire original lock.
  if(lk != &ptable.lock){  //DOC: sleeplock2
    release(&ptable.lock);
    acquire(lk);
  }
}

//PAGEBREAK!
// Wake up all processes sleeping on chan.
// The ptable lock must be held.
static void
wakeup1(void *chan)
{
  struct proc *p;

  for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
    if(p->state == SLEEPING && p->chan == chan)
      p->state = RUNNABLE;
}

// Wake up all processes sleeping on chan.
void
wakeup(void *chan)
{
  acquire(&ptable.lock);
  wakeup1(chan);
  release(&ptable.lock);
}

// Kill the process with the given pid.
// Process won't exit until it returns
// to user space (see trap in trap.c).
int
kill(int pid)
{
  struct proc *p;

  acquire(&ptable.lock);
  for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    if(p->pid == pid){
      p->killed = 1;
      // Wake process from sleep if necessary.
      if(p->state == SLEEPING)
        p->state = RUNNABLE;
      release(&ptable.lock);
      return 0;
    }
  }
  release(&ptable.lock);
  return -1;
}

//PAGEBREAK: 36
// Print a process listing to console.  For debugging.
// Runs when user types ^P on console.
// No lock to avoid wedging a stuck machine further.
void
procdump(void)
{
  static char *states[] = {
  [UNUSED]    "unused  ",
  [EMBRYO]    "embryo  ",
  [SLEEPING]  "sleep   ",
  [RUNNABLE]  "runble  ",
  [RUNNING]   "run     ",
  [ZOMBIE]    "zombie  ",
  [BLOCKING]    "blocking"
  };
  int i;
  struct proc *p;
  char *state;
  uint pc[10];
  
  for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    if(p->state == UNUSED)
      continue;
    if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
      state = states[p->state];
    else
      state = "???";
      cprintf("(%d, %d) %s %s", p->pid, p->tid, state, p->name);
      cprintf("**totalElapsedTime %d, totalRunTime %d, totalWaitTime %d**\n",uptime() - p->ctime,p->rtime, uptime() - p->ctime - p->rtime);
      
//      int totalElapsedTime = 0;
//      int totalRunTime = 0;
//      int totalWaitTime = 0;
//      retrieve_process_statistics(&totalElapsedTime,&totalRunTime,&totalWaitTime);
//      cprintf("\n **totalElapsedTime %d, totalRunTime %d, totalWaitTime %d**\n",totalElapsedTime,totalRunTime,totalWaitTime);

    if(p->state == SLEEPING){
      getcallerpcs((uint*)p->context->ebp+2, pc);
      for(i=0; i<10 && pc[i] != 0; i++)
        cprintf(" %p", pc[i]);
    }
    cprintf("\n");
  }
}


//kthread implementation

int kthread_create( void*(*start_func)(), void* stack, unsigned int stack_size ){
  int i;
  int tid;
  struct proc *np;
//   cprintf("- creating a kernel thread\n");

  // Allocate process.
  if((np = allocproc()) == 0)
    return -1;

  np->state = EMBRYO;


  np->pgdir = proc->pgdir;	// both should have common variables
  np->sz = proc->sz;
  np->parent = proc->parent;
  np->pid = proc->pid;	// not needed - the orginal statement
  np->tid = *proc->nextTID;
  np->nextTID = proc->nextTID;
  np->threadCounter = proc->threadCounter;
  *np->tf = *proc->tf;
  
  np->tf->esp = (int)stack + stack_size;
  np->tf->eip = (int)start_func;
  
  // Clear %eax so that fork returns 0 in the child.
  np->tf->eax = 0;
  
  for(i = 0; i < NOFILE; i++)	// should share file descriptors
    if(proc->ofile[i])
      np->ofile[i] = filedup(proc->ofile[i]);
  np->cwd = idup(proc->cwd);

  acquire(&ptable.lock);
  *np->nextTID = *np->nextTID + 1;
  *np->threadCounter = *np->threadCounter + 1;
  release(&ptable.lock);
  
  tid = np->tid;
  np->state = RUNNABLE;
  safestrcpy(np->name, proc->name, sizeof(proc->name));
    np->ctime = uptime();
    np->rtime = 0;
    np->etime = 0;
//   cprintf("thread created (%d, %d)\n", np->pid, np->tid);
  return tid;
}

int kthread_id(){
  return proc->tid;
}

void kthread_exit(){
  struct proc *p;
  int fd;
    // Close all open files.

//     cprintf("(%d, %d) - threadCounter: %d\n", proc->pid, proc->tid, *proc->threadCounter);
  if(*proc->threadCounter == 1){
//     cprintf("--here (%d, %d)\n", proc->pid, proc->tid);
    // If I got here, then I'm the last thread - closing the process
    for(fd = 0; fd < NOFILE; fd++){
      if(proc->ofile[fd]){
	fileclose(proc->ofile[fd]);
	proc->ofile[fd] = 0;
      }
    }

        iput(proc->cwd);
    proc->cwd = 0;

    acquire(&ptable.lock);

    wakeup1(proc->parent);
    

    for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
      // Pass abandoned children to init.
      if(p->parent->pid == proc->pid){
	p->parent = initproc;
	if(p->state == ZOMBIE){
	  wakeup1(initproc);
        }
      }
    }
        proc->state = ZOMBIE;
    *proc->threadCounter = *proc->threadCounter - 1;
      proc->etime = uptime();
//     cprintf(" 1 1 1 *proc->threadCounter: %d\n", *proc->threadCounter);
    // Jump into the scheduler, never to return.
    sched();
  }else{
    acquire(&ptable.lock);
    proc->state = ZOMBIE;
    *proc->threadCounter = *proc->threadCounter - 1;
    wakeup1(proc);
      proc->etime = uptime();
//     procdump();
//     cprintf(" 2 2 2 	*proc->threadCounter: %d\n", *proc->threadCounter);
    // Jump into the scheduler, never to return.
    sched();
  }
}

int kthread_join( int thread_id ){
  struct proc *p;
  struct proc *sleepOn;
  
  // Get reference to the thread i'm waiting for
  for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
    if(p->pid == proc->pid && p->tid == thread_id){
      sleepOn = p;
      break;
        }
  }
  
    
    acquire(&ptable.lock);
  for(;;){
    if(sleepOn->state == ZOMBIE){
      release(&ptable.lock);
      return 0;
    }
        if(sleepOn->state == UNUSED){
      release(&ptable.lock);
      return -1;
    }
    
//     cprintf("(%d, %d) sleep %s\n", proc->pid, proc->tid, proc->name);
        // Wait for children to exit.  (See wakeup1 call in proc_exit.)
       sleep(sleepOn, &ptable.lock);  //DOC: wait-sleep
  }
  release(&ptable.lock);
  return -1;
}



int kthread_mutex_alloc(){
  int i;
//   cprintf("(%d, %d) alloc\n", proc->pid, proc->tid);
  acquire(&mutex_table.lock);
  for(i=0; i<MAX_MUTEXES; i++){
    if(mutex_table.mutex[i].created == 0)
      break;
  }
  if(i == MAX_MUTEXES){
    release(&mutex_table.lock);
    return -1;		// All locks are initialized
  }
  mutex_table.mutex[i].owner = 0;
  mutex_table.mutex[i].created = 1;
  release(&mutex_table.lock);
  return i;
}

int kthread_mutex_dealloc( int mutex_id ){
//   cprintf("(%d, %d) dealloc\n", proc->pid, proc->tid);
  acquire(&mutex_table.lock);
  if(mutex_id >= MAX_MUTEXES || mutex_id < 0 || 	// Not in range
      mutex_table.mutex[mutex_id].created == 0 || 		// Was never allocated
      mutex_table.mutex[mutex_id].owner != 0){			// Is still hold
    release(&mutex_table.lock);
    return -1;						// Some thing is wrong!
  }
  mutex_table.mutex[mutex_id].owner = 0;
  mutex_table.mutex[mutex_id].created = 0;
  release(&mutex_table.lock);
  return 0;
}

int kthread_mutex_lock( int mutex_id ){
//   cprintf("(%d, %d) locking\n", proc->pid, proc->tid);
  acquire(&mutex_table.lock);
  if(mutex_id >= MAX_MUTEXES || mutex_id < 0 || mutex_table.mutex[mutex_id].created == 0){
    release(&mutex_table.lock);
    return -1;
  }
  if(mutex_table.mutex[mutex_id].owner == proc){
    // I already hold the lock - may be should return -1 ; need to check that out
    release(&mutex_table.lock);
    return 0;
  }
  if(mutex_table.mutex[mutex_id].owner != 0){
    // somebody else is holding the lock, waiting for it.
//     cprintf("(%d, %d) i'll be back, queue state: %d - %d\n", proc->pid, proc->tid, mutex_table.q.first, mutex_table.q.next);
//     cprintf("(%d, %d) is waiting for mutex %d\n", proc->pid, proc->tid, mutex_id);
    enqueue(proc, &mutex_table.mutex[mutex_id].q);		//BLOCKING CALL
  }
  // I'm back, should be holding mutex_table.lock, and mutex_table.owner should be 0
//   cprintf("(%d, %d) got lock %d\n", proc->pid, proc->tid, mutex_id);
//   cprintf("(%d, %d) is holding mutex %d\n", proc->pid, proc->tid, mutex_id);
  mutex_table.mutex[mutex_id].owner = proc;
  release(&mutex_table.lock);
  return 0;
}

int kthread_mutex_unlock( int mutex_id ){
//   cprintf("(%d, %d) unlocking\n", proc->pid, proc->tid);
  acquire(&mutex_table.lock);
  if(mutex_id >= MAX_MUTEXES || mutex_id < 0 || 	// Index out of range
      mutex_table.mutex[mutex_id].owner != proc || 		// I'm not the mutex's owner
      mutex_table.mutex[mutex_id].created == 0){		// The mutex was never allocated
      release(&mutex_table.lock);
    return -1;
  }
//   cprintf("(%d, %d) is releasing mutex %d\n", proc->pid, proc->tid, mutex_id);
  mutex_table.mutex[mutex_id].owner = 0;
//   cprintf("(%d, %d) queue state is: %d - %d ; %d\n", proc->pid, proc->tid, mutex_table.q.first, 
// 	  mutex_table.q.next, !queueIsEmpty(&mutex_table.q));
  if(!queueIsEmpty(&mutex_table.mutex[mutex_id].q)){
    dequeue(&mutex_table.mutex[mutex_id].q);
  }
  release(&mutex_table.lock);
  return 0;
}



int kthread_cond_alloc(){
  int i;
  
  acquire(&CV_table.lock);
  for(i=0; i<MAX_CONDS; i++){
    if(CV_table.cv[i].created == 0)
      break;
  }
  
  if(i == MAX_CONDS){
    // All CV's are taken
    release(&CV_table.lock);
    return -1;
  }
  
  CV_table.cv[i].created = 1;
  release(&CV_table.lock);
  return i;
}

int kthread_cond_dealloc( int cond_id ){
  int i;
  
  acquire(&CV_table.lock);	// don't dealloc while alloc is running
  if( cond_id < 0 || cond_id >= MAX_CONDS || CV_table.cv[cond_id].created == 0){
    release(&CV_table.lock);
    return -1;
  }
  //check that no onw is waiting on this CV.
  for(i=0; i<NPROC; i++){
    if(CV_table.cv[cond_id].waiting != 0)
      break;
  }
  if(i != NPROC){
    // Somebody is waiting on that lock - can't dealloc it.
    release(&CV_table.lock);
    return -1;
  }
  CV_table.cv[cond_id].created = 0;
  release(&CV_table.lock);
  return 0;
}

int kthread_cond_wait( int cond_id, int mutex_id ){
  int i;
  
  acquire(&CV_table.lock);
//   cprintf("(%d, %d) waiting - cond_id: %d, mutex_id: %d\n", proc->pid, proc->tid, cond_id, mutex_id);
  // Check that the input is valid.
  if(cond_id < 0 || cond_id >= MAX_CONDS || mutex_id < 0 || mutex_id >= MAX_MUTEXES
      || CV_table.cv[cond_id].created == 0 || mutex_table.mutex[mutex_id].created == 0 
      || mutex_table.mutex[mutex_id].owner != proc){
      release(&CV_table.lock);
      return -1;
  }
  
  for(i=0; i<NPROC; i++){
    if(CV_table.cv[cond_id].waiting[i] == 0)
      break;
  }
  // No need to check that i<NPROC because there is room for all possible process
  CV_table.cv[cond_id].waiting[i] = proc;
  release(&CV_table.lock);
  acquire(&ptable.lock);
//   cprintf("(%d, %d) is blocked - sleeping on CV: %d, kinda holding mutex: %d\n", proc->pid, proc->tid, cond_id, mutex_id);
  kthread_mutex_unlock(mutex_id);
  proc->state = BLOCKING;
  sched();
  kthread_mutex_lock(mutex_id);
  release(&ptable.lock);
  acquire(&CV_table.lock);
  
  release(&CV_table.lock);
  return 0;
}

int kthread_cond_signal( int cond_id ){
  struct proc* p;
  int i;
  
  // The whole table stays locked untill all process are notified, to avoid a situation
  // where one process is notifing and meanwhile new process' add them selves to the table
  // possibly in an index that was already passed in the for-loop (e.g. i is 10, those process'
  // where notified, and a new process adds its self to the 0 index in the table -> could result
  // in a dead lock)
  acquire(&CV_table.lock);
//   cprintf("(%d, %d) signaling - cond_id: %d\n", proc->pid, proc->tid, cond_id);
  if(CV_table.cv[cond_id].created == 0){
    // CV was never created
    release(&CV_table.lock);
    return -1;
  }
  for(i=0; i<NPROC; i++){
    if(CV_table.cv[cond_id].waiting[i] != 0){
      p = CV_table.cv[cond_id].waiting[i];
//       cprintf("(%d, %d): waking: (%d, %d), cond_id: %d\n", proc->pid, proc->tid, p->pid, p->tid, cond_id);
      p->state = RUNNABLE;
      CV_table.cv[cond_id].waiting[i] = 0;
    }
  }
//   cprintf("here4 (%d, %d)\n", proc->pid, proc->tid);
  
  release(&CV_table.lock);
  return 0;
}

int kthread_getCount(){
    return *proc->threadCounter;
}
void wrap_function(void (*entry)()){
  entry();
  kthread_exit();
}

//set the corresponding statistics where the arguments pointers point
int retrieve_process_statistics( int* totalElapsedTime, int* totalRunTime, int* totalWaitTime){
    struct proc *p;
    acquire(&ptable.lock);
    // Get reference to the thread i'm waiting for
    for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
        if (p->state == RUNNING) {
            break;
        }
    }
    
    release(&ptable.lock);

    if (p) {
        *totalElapsedTime = uptime() - p->ctime;
        *totalRunTime = p->rtime;
        *totalWaitTime = uptime() - p->ctime - p->rtime;

        return 0;
    }
    

//     cprintf("\n no valid proc %d",p);
    return -1;
}

// fifo struct manager
/*******
 * General note: when (q->first == q->next) is true, one of the following two is happening:
 * 1. The queue is empty
 * 2. The queue is full
 * The way to distinguish between the tow, is by checking if there is a null process
 *******/

int enqueue(struct proc* p, struct myQueue *q){
  // Should acquire mutex_table.lock before calling this function 
  int hasAcquiredPtableLock;
  
  if(queueIsFull(q))
    return -1;
  q->procs[q->next] = p;
  q->next = (q->next+1)%NPROC;
  // It is important to not switch the next tow lines, because then I have no locks and another
  // thread could bypass me, violating fifo.
  hasAcquiredPtableLock = !holding(&ptable.lock);
  if(hasAcquiredPtableLock ){
    // This is happening when called from kthread_cond_signal - one thread is waking the other
    // then ptable.lock is already locked.
    acquire(&ptable.lock);
  }
  
  release(&mutex_table.lock);
  p->state = BLOCKING;
  sched();
  if(hasAcquiredPtableLock ){
    release(&ptable.lock);
  }
  acquire(&mutex_table.lock);
  return 0;
}

int dequeue(struct myQueue *q){
  // Should acquire mutex_table.lock before calling this function 
  struct proc* p;
  
  if(queueIsEmpty(q))
    // no one to run, or queue is empty
    return -1;
  p = q->procs[q->first];
  q->procs[q->first] = 0;
  p->state = RUNNABLE;
//   cprintf("(%d, %d) is unlocking (%d, %d)\n", proc->pid, proc->tid, p->pid, p->tid);
  q->first = (q->first+1)%NPROC;
  return 0;
}

int queueIsEmpty(struct myQueue *q){
  return (q->first == q->next && q->procs[q->first] == 0);
}

int queueIsFull(struct myQueue *q){
  return (q->first == q->next && q->procs[q->first] != 0);
}