// RPC stubs for clients to talk to lock_server, and cache the locks
// see lock_client.cache.h for protocol details.

#include "lock_client_cache.h"
#include "rpc.h"
#include <sstream>
#include <iostream>
#include <stdio.h>

#define REVOKED 1
#define NOT_REVOKED 0
#define CAN_RETRY 1
#define DO_NOT_RETRY 0
#define JUST_REVOKED 1
#define NOT_JUST_REVOKED 0

static void *
releasethread(void *x)
{
  lock_client_cache *cc = (lock_client_cache *) x;
  cc->releaser();
  return 0;
}

int lock_client_cache::last_port = 0;

lock_client_cache::lock_client_cache(std::string xdst, 
				     class lock_release_user *_lu)
  : lock_client(xdst), lu(_lu)
{
  srand(time(NULL)^last_port);
  rlock_port = ((rand()%32000) | (0x1 << 10));
  const char *hname;
  // assert(gethostname(hname, 100) == 0);
  hname = "127.0.0.1";
  std::ostringstream host;
  host << hname << ":" << rlock_port;
  id = host.str();
  last_port = rlock_port;
  rpcs *rlsrpc = new rpcs(htons(rlock_port));
  /* register RPC handlers with rlsrpc */
  (*rlsrpc).reg(rlock_protocol::revoke, this, &lock_client_cache::revoke);
  (*rlsrpc).reg(rlock_protocol::retry, this, &lock_client_cache::retry);
  
  pthread_mutex_init(&lock_cache_mutex, NULL);
  pthread_cond_init(&revoke_cond_mutex, NULL);
  
  pthread_t th;
  int r = pthread_create(&th, NULL, &releasethread, (void *) this);
  assert (r == 0);
}


void
lock_client_cache::releaser()
{

  // This method should be a continuous loop, waiting to be notified of
  // freed locks that have been revoked by the server, so that it can
  // send a release RPC.
  
  pthread_mutex_lock(&lock_cache_mutex);
  while (1){
    
    //conditionally wait till something may need to be revoked
    pthread_cond_wait(&revoke_cond_mutex, &lock_cache_mutex);
    
    std::map<lock_protocol::lockid_t, lock>::iterator it;
    std::map<lock_protocol::lockid_t, lock>::iterator check;
    
    //go through entire map and see if anything needs to be revoked
    for (it = lock_cache.begin(); it != lock_cache.end(); it++){

      //see if it is revoked
      if ((it->second).revoked_bool == REVOKED){

	//if we still have the lock, send release to sever
	//and change status in map
	if ((it->second).lstat == lock_protocol::FREE){
	  int r;
	  int ret = cl->call(lock_protocol::release, it->first, r);
	  if (ret == lock_protocol::OK){
	    (it->second).lstat = lock_protocol::NONE;
	    (it->second).revoked_bool = NOT_REVOKED;
	    (it->second).retry_bool = DO_NOT_RETRY;
	    (it->second).just_revoked = JUST_REVOKED;
	  }
	  
	  //let all known that something with this lock changed
	  pthread_cond_broadcast(&((it->second).lock_cond_mutex));	  
	  }
	}
    }
  }
  pthread_mutex_unlock(&lock_cache_mutex);
}


//make a new lock struct
lock lock_client_cache::make_lock(lock_protocol::lock_status l,
				   int retry_bool,
				   int revoked_bool){
  lock new_lock;
  new_lock.lstat=l;
  new_lock.just_revoked = NOT_JUST_REVOKED;
  new_lock.retry_bool = retry_bool;
  new_lock.revoked_bool = revoked_bool;
  pthread_cond_init(&(new_lock.lock_cond_mutex), NULL);
  return new_lock;
}

//print lock to stdout
void lock_client_cache::print_lock(lock l){
  printf("lock in %s\n", id.c_str());
  switch(l.lstat){
  case lock_protocol::FREE: 
    printf("\tlock_status: FREE\n");
    break;
  case lock_protocol::NONE: 
    printf("\tlock_status: NONE\n");
    break;
  case lock_protocol::LOCKED: 
    printf("\tlock_status: LOCKED\n");
    break;
  default:
    printf("\tlock_status: %d\n", l.lstat);
    break;
  }
  printf("\tretry_bool: %d\n", l.retry_bool);
  printf("\trevoked_bool: %d\n", l.revoked_bool);
  printf("\tjust_revoked: %d\n", l.just_revoked);
}



rlock_protocol::status
lock_client_cache::acquire(lock_protocol::lockid_t lid)
{
  int k;
  pthread_mutex_lock(&lock_cache_mutex);
  printf("RECEIVED ACQUIRE and mutex IN CLIENT FOR LOCK %llu \n", lid);
    
  std::map<lock_protocol::lockid_t, lock>::iterator it;
  it = lock_cache.find(lid);

  //if its not in the map, add it to the map with status NONE
  if (it == lock_cache.end()){
    lock l = make_lock(lock_protocol::NONE, 0, 0);
    lock_cache[lid] = l;
    print_lock(lock_cache.find(lid)->second);
    
  } 
  
  int send_acquire =0;
  while (1) {
    send_acquire =0;

    //find lock
    it = lock_cache.find(lid);

    lock lid_lock = it->second;

    //if its locked, then condwait on lock;
    if (lid_lock.lstat == lock_protocol::LOCKED){
      send_acquire =0;
      pthread_cond_wait(&((it->second).lock_cond_mutex), &lock_cache_mutex);
      

      //if request got a retry or just been revoked...then go to send
      //acquire request again
    }else if (((lid_lock.retry_bool == CAN_RETRY)|
	       (lid_lock.just_revoked == JUST_REVOKED)) 
	      && (lid_lock.retry_bool == lock_protocol::NONE)){

      send_acquire = 0;
      (it->second).retry_bool = DO_NOT_RETRY;
      (it->second).just_revoked = NOT_JUST_REVOKED;
      printf("found a retry method or just a revoke\n");
      
      
      //if the lock status is NONE...send acquire
    }else if (lid_lock.lstat == lock_protocol::NONE){
      
      //haven't already send an acquire
      if (send_acquire == 0){
	send_acquire = 1;

	//rpc acquire
	int ret = cl ->call(lock_protocol::acquire, lid, id, k);
	
	//received lock
	if (ret == lock_protocol::OK){
	  printf("returned OK from server\n");
	  (it->second).lstat = lock_protocol::LOCKED;
	  pthread_mutex_unlock(&lock_cache_mutex);
	  return rlock_protocol::OK;
	  
	  //lock isn't ready -- wait
	}else if (ret == lock_protocol::RETRY){
	  pthread_cond_wait(&((it->second).lock_cond_mutex), &lock_cache_mutex);
	} else {
	  send_acquire =0;
	}
	//lock send not ok -- wait
      } else { 
	pthread_cond_wait(&((it->second).lock_cond_mutex), &lock_cache_mutex);
      }
      
      //lock is free -- take it
    }else if (lid_lock.lstat == lock_protocol::FREE){
	(it->second).lstat = lock_protocol::LOCKED;
	pthread_mutex_unlock(&lock_cache_mutex);
	return rlock_protocol::OK;
    } else {
      print_lock(lid_lock);
    }

  }
  pthread_mutex_unlock(&lock_cache_mutex);
  return rlock_protocol::RPCERR;
}

rlock_protocol::status
lock_client_cache::release(lock_protocol::lockid_t lid)
{
  pthread_mutex_lock(&lock_cache_mutex);
  std::map<lock_protocol::lockid_t, lock>::iterator it;
  it = lock_cache.find(lid);
  
  //if its in the lock, free and signal to others
  if (it != lock_cache.end()){
    (it->second).lstat= lock_protocol::FREE;
    pthread_cond_broadcast(&revoke_cond_mutex);
    pthread_cond_broadcast(&(it->second).lock_cond_mutex);
    pthread_mutex_unlock(&lock_cache_mutex);
    return rlock_protocol::OK;

    //not in map 
  }else {
    pthread_mutex_unlock(&lock_cache_mutex);
    return rlock_protocol::RPCERR;
  }
}

int lock_client_cache::revoke(lock_protocol::lockid_t lid, int random, int &r){

    pthread_mutex_lock(&lock_cache_mutex);

    std::map<lock_protocol::lockid_t, lock>::iterator it;
    std::map<lock_protocol::lockid_t, lock>::iterator check;
    
  it = lock_cache.find(lid);
  
  //if its in the map and not NONE, set revoke flag
  //signal others
  if (it != lock_cache.end()){
    if((it->second).lstat != lock_protocol::NONE){
      (it->second).revoked_bool = REVOKED;
      pthread_cond_broadcast(&revoke_cond_mutex);
    }
    pthread_mutex_unlock(&lock_cache_mutex);
    return rlock_protocol::OK;
    
    //not in map
  }else{
    pthread_mutex_unlock(&lock_cache_mutex);
    return rlock_protocol::RPCERR;
  }
}

int lock_client_cache::retry(lock_protocol::lockid_t lid, int random, int &r){

  pthread_mutex_lock(&lock_cache_mutex);
  std::map<lock_protocol::lockid_t, lock>::iterator it;
  std::map<lock_protocol::lockid_t, lock>::iterator check;

  it = lock_cache.find(lid);

  //if its in the map, set retry flag
  //signal waiting threads
  if (it != lock_cache.end()){
    if ((it->second).lstat == lock_protocol::NONE){
      (it->second).retry_bool = CAN_RETRY;
      pthread_cond_broadcast(&((it->second).lock_cond_mutex));
    }
    pthread_mutex_unlock(&lock_cache_mutex);
    return rlock_protocol::OK;
    
  //not in map
  }else{
    pthread_mutex_unlock(&lock_cache_mutex);
    return rlock_protocol::RPCERR;
  }
}


