package main

// Interface for the webserver to call and find out if its alright to perform operation
// Function that starts off the failure detector in a separate thread

import (
       "fmt"
       "strings"
       "rpc"
       "net"	
       "http"
       "time"
       "sync"
)

const (
      READ_OP = 1
      WRITE_OP = 2

      MAX_SERVERS = 100

      RPC_PORT = "15000"
      RPC_LOG_PORT = "20000"
      LOG_LATENCY = 5e8
)

// Mapping from the partition number to its current primary node
// Partitions numbered from 1 to n
// Primary Servers numbered from 1 to n
var partition_primary_map = make(map[int] int)

// Mapping from the IP address to its server ID
// Server Ids numbered from 1 to n
var ip_serverid_map = make(map[string] int)
var serverid_ip_map = make(map[int] string)

// Channel of objects to handle failure data communicated by the failure detector
// Allows buffering of 1000 data elements communicated
var failure_data_chan = make(chan status_update, 1000)

// List of servers in "ip:port" format
var peer_servers [100]string
var all_servers [100]string
var n_servers int

// Number of hastables being maintained
var n_hashtables int
var n_partitions int 

//My IP and port
var my_ip string
var my_port string

// Index of the log that the node has seen from your side.
// The outer index - partition number - 1 to n
// The inner index - server id - 1 to n
var logs_seen_index [100][100] int

// Current server status
var server_status = make(map[string] int)

// Various locks. All are reader-writer locks
var partition_primary_mutex sync.RWMutex
var logs_seen_mutex sync.RWMutex
var server_status_mutex sync.RWMutex
var rpc_clients_mutex sync.RWMutex

// Map for IP - client for RPC Remote write
var ip_rpcclient_map = make(map[string] *rpc.Client)

/* 
   initPartition - On server startup, initialize the primary nodes for each of the partition
   n - Number of servers in the system at startup time
*/
func initPartitions(n int) {
     partition_primary_mutex.Lock();
     for i := 1; i <= n; i++ {
       partition_primary_map[i] = serverID;
     }
     partition_primary_mutex.Unlock();
}

func printPartitionMapping() {
     fmt.Printf("...................................\n");
     fmt.Printf("%v\n", partition_primary_map);
     fmt.Printf("%v\n", serverid_ip_map);     
     fmt.Printf("...................................\n");
}

func initRPC() {
     rpc_request := new(RPCRequest);
     rpc.Register(rpc_request);
     rpc.HandleHTTP();
     l, e := net.Listen("tcp", ":" + RPC_PORT);
     if e != nil {
     	fmt.Printf("Listen error : ", e);
     }
     go http.Serve(l, nil);
}

func initLogRPC() {
     rpc_log_request := new(RPCLogRequest);
     rpc.Register(rpc_log_request);
     rpc.HandleHTTP();
     l, e := net.Listen("tcp", ":" + RPC_LOG_PORT);
     if e != nil {
     	fmt.Printf("Listen error : ", e);
     }
     go http.Serve(l, nil);
}

func partitionUpdater() {
     for {
         partition_primary_mutex.RLock();
	 server_status_mutex.RLock();
	 logs_seen_mutex.Lock();         

     	 for i := 0; i < n_partitions; i++ {
	     //if partition_primary_map[i+1] == serverID {
	     	// I own this partition. send my log to all the servers
	     	for j := 0; j < n_servers; j++ {
		     // If you do not have fresh data for this partition and the server, then do not send logs.
       		     //if (logs_seen_index[i+1][j+1] == getLogEnd(i+1)) {
		     //	 continue;
		     //}
	     	     dst_ip := serverid_ip_map[j+1];
	     	     if dst_ip != my_ip && server_status[dst_ip] == 1 {
		     	//fmt.Printf("=================\nPeriodic log update to %s\n=================\n", dst_ip);
		     	// The other guy is awake. send him the logs
		    	go sendLog(dst_ip, i+1, logs_seen_index[i+1][j+1]);
		    	logs_seen_index[i+1][j+1] = getLogEnd(i+1);
		     }
		 }
	     //}
	 }

         partition_primary_mutex.RUnlock();
	 server_status_mutex.RUnlock();
	 logs_seen_mutex.Unlock();         

     	 time.Sleep(LOG_LATENCY);
     }
}

// It asks the primary for the partition number being referred to to send its logs to me so that I can sync up with him.
// ip - The primary node for the partition I want synced
// partition_number - The partition I am concerned with.
func RPCReceiveLogs(ip string, partition_number int) {
     client, err := rpc.DialHTTP("tcp", ip + ":" + RPC_LOG_PORT);
     if (err != nil) {
           fmt.Printf("In remote receive logs Dialing error : %s\n", err);
	   newstatus := new(status_update);
	   newstatus.IP = ip;
	   newstatus.status = "down";
	   failure_data_chan <- *newstatus;
	   return;
     }

     var args LogArgs;
     args.ip_addr = my_ip;
     args.partitionNumber = partition_number;
     args.start_point = -1;
     
     reply := new(Reply)

     err1 := client.Call("RPCLogRequest.RPCSendLog", &args, reply);
     if (err1 != nil) {
        fmt.Printf("RPCLogRequest error to remote send log : %s\n", err1);
     } else {
 	fmt.Printf("Remote log request` returned to this node\n");	
     }
}

func RPCCreateClient(ip string) {
     client, err := rpc.DialHTTP("tcp", ip + ":" + RPC_PORT);
     if (err != nil) {
        fmt.Printf("in remote write request Dialing error : %s\n", err);
	time.Sleep(1e9);
	RPCCreateClient(ip);
	return;
     }

     rpc_clients_mutex.Lock();
     ip_rpcclient_map[ip] = client;     
     rpc_clients_mutex.Unlock();     

}

func RPCRemoteWriteRequest(ip string, key string, value string, partition_number int) {
     fmt.Println("Not the primary, so going to do an RPC")  
     var args Args;
     args.key = key;
     args.value = value; 
     args.ip_addr = my_ip;
     args.partitionNumber = partition_number;

     reply := new(Reply)

     server_status_mutex.RLock();
     status := server_status[ip];
     server_status_mutex.RUnlock();     

     if (status == 0) {
          return;
     }

     rpc_clients_mutex.RLock();
     client := ip_rpcclient_map[ip];
     rpc_clients_mutex.RUnlock();

     err1 := client.Call("RPCRequest.RPCWrite", &args, reply);
     if (err1 != nil) {
        fmt.Printf("RPCRequest error to remote write: %s\n", err1);
     } else {
        fmt.Printf("Remote write returned to this node\n");
	writeTable(key, reply.value);
    }     
    //client.Close();
    fmt.Println("Got the reply")
}

/*
   validateOperation - Interface for the webserver 
   opr - Operation requested (READ_OP / WRITE_OP)
   key - the key on which the operation is to be performed
*/
// TODO : validateOperation needs a copy of the value also to perform the RPC to the remote primary
func validateOperation(opr int, key string, value string) bool {
        fmt.Printf("Operation : %d, Key : %s\n", opr, key)     	

	hashedkey := hash(key)
	partition := findPartition(hashedkey)
	partition_primary := getPrimaryNode(partition) 

	fmt.Println("Primary for",partition,"is",partition_primary)
	fmt.Println("IP is",serverid_ip_map[partition_primary])

     	if (opr == WRITE_OP && serverID == partition_primary) {
	   return true
	} else if (opr == WRITE_OP && serverID != partition_primary) {
	   // Do an RPC call to the concerned primary to do the write operation
	   // partition_primary is the server that is responsible here
	   // Sync the partition from that primary before doing this remote write

	   //fmt.Printf("Going to do remote log request\n")
           //RPCReceiveLogs(serverid_ip_map[partition_primary], partition_primary);
	   //fmt.Printf("Received logs successfully from %s for partition %d\n", serverid_ip_map[partition_primary], partition_primary)
	   fmt.Printf("Going to do remote write on server %s for partition %d key %s value %s\n", serverid_ip_map[partition_primary], partition_primary, key, value);
	   RPCRemoteWriteRequest(serverid_ip_map[partition_primary], key, value, partition_primary);
	   fmt.Printf("Remote write successfully done %s for key %s value %s\n", serverid_ip_map[partition_primary], key, value);

	   return true;
	} else if (opr == READ_OP) {
	   return true
	}
	return true
}

// Wrapper that sets up the failure detector
func setupFailureDetector() {
     	// Starts the function "failureDetector" in a separate thread of execution.
	go handleFailureData();
        go failureDetector();
}

// Wrapper that sets up election manager
func setupPeriodicUpdater() {
     	// Starts the periodic updater in a separate thread of execution
	go partitionUpdater();
}

// Wrapper that sets up election manager
func setupElectionManager() {
     	// Starts the election manager in a separate thread of execution
	go electionManager(serverID-1, all_servers, n_servers)
}

// Given the hashed key, this function returns its partition number
func findPartition(hashedkey int) int {
        return (hashedkey % n_hashtables + 1);
}

// Given a partition number, this returns its current primary node
func getPrimaryNode(partition int) int {
     	partition_primary_mutex.RLock();
        partition_primary := partition_primary_map[partition];
     	partition_primary_mutex.RUnlock();

	return partition_primary;
}

func parseIP(servers []string) {
     // Get the list of peer servers. Lose the ports.
     cnt := 0   
     for i := 0; i < len(servers); i++ {
     	 if (len(servers[i]) != 0) {
	    cnt++;
	 }
     }
     n_servers = cnt;
     n_hashtables = n_servers;
     n_partitions = n_servers;

     pos := strings.Index(servers[0], ":")
     my_ip = servers[0][0:pos]
     my_port=servers[0][pos+1:]

     for i := 0; i < n_servers; i++ {
     	 for j := 0; j < n_servers - 1; j++ {
	     if (servers[j] > servers[j+1]) {
	     	temp := servers[j];
		servers[j] = servers[j+1];
		servers[j+1] = temp;
	     }
         }
     }

     for i := 0; i < n_servers; i++ {
          pos := strings.Index(servers[i], ":")
	  if my_ip == servers[i][0:pos] {
	     serverID = i+1;
	  }
     }

     k := 0;
     for i := 0; i < n_servers; i++ {
         pos := strings.Index(servers[i], ":")
	 s := servers[i][0:pos]; 
	 
	 ip_serverid_map[s] = i + 1;
	 serverid_ip_map[i + 1] = s;

	 all_servers[i] = s;
     	 if (i != serverID - 1) { 
	 	 peer_servers[k] = s;
	 	 k += 1; 
	 }
     }     
}

func initPartitionManager(servers []string) {
     //parseIP(servers);
     initPartitions(n_hashtables);

     logs_seen_mutex.Lock();
     for i := 0; i <= n_partitions; i++ {
     	 for j := 0; j <= n_servers; j++ {
	     logs_seen_index[i][j] = 0;
	 }
     }
     logs_seen_mutex.Unlock();

     server_status_mutex.Lock();
     for i := 0; i < n_servers; i++ {
     	 server_status[all_servers[i]] = -1;
     }
     server_status[my_ip] = 1;
     server_status_mutex.Unlock();

     initRPC();
     initLogRPC();
     setupFailureDetector();
     setupElectionManager();
     setupPeriodicUpdater();	
}

func handleFailureData() {
     var new_mapping [100]primaryMapping;

     for {
	 fmt.Printf("Waiting for an input on the channel\n");
     	 failure_data := <-failure_data_chan

	 if (failure_data.status == "up" && server_status[failure_data.IP] == 1) {
	    continue;
	 }
	 if (failure_data.status == "down" && server_status[failure_data.IP] == 0) {
	    continue;
	 }
	 
	 // Some failure information has come on the channel. Lock the common data structures 
	 // before handling the failure.
         partition_primary_mutex.Lock();
	 server_status_mutex.Lock();
	 logs_seen_mutex.Lock();              	      	 

	 if (failure_data.status == "up") {
     	    fmt.Printf("Status received [up] : %s\n", failure_data.IP);
     	    // A node has gone down. Need to elect and alternate node to act as its partitions primary
	    // Pass failure_data.IP to the election manager.

	    server_status[failure_data.IP] = 1;
	    
	    go RPCCreateClient(failure_data.IP);

	    new_mapping = restorePrimary(failure_data.IP);

     	 } else if (failure_data.status == "down") {
     	    fmt.Printf("Status received [down] : %s\n", failure_data.IP);
       	    // What must happen when a node comes up ?!
	    // Should all the peers again conduct an election so that its not considered "up" till everyone feels so ?! 

	    server_status[failure_data.IP] = 0;

	    new_mapping = electNewPrimary(failure_data.IP);

	    // Reset the logs seen index by that server to 0
	    for i := 0; i < n_partitions; i++ {
	    	logs_seen_index[i+1][ip_serverid_map[failure_data.IP]] = 0
	    }
     	 }

	 fmt.Printf("Mapping obtained : %v\n", new_mapping);

	 // TODO : Lock the partition mapping in this server when I am updating the partition mapping
	 // based on the inputs from failure detector and election manager

	 // If the prev and the current primary is the same, then do nothing
	 // If the prev primary is me and the current is someone else, then send my data for that partition to the new primary
	 for i := 0; i < n_partitions; i++ {

	     // I am responsible to send my partition to the node that has just come up.
	     // Send the hashtable for my partition.	     
    	     if (len(new_mapping[i].previousPrimary) > 0 && len(new_mapping[i].currentPrimary) > 0 && 
	         new_mapping[i].previousPrimary == new_mapping[i].currentPrimary && 
		 new_mapping[i].currentPrimary == my_ip) {
		     if (failure_data.status == "up") {
		     	 fmt.Printf("In partition manager : going to call hashtable send for partition %d to %s\n", new_mapping[i].partitionNumber + 1, failure_data.IP);
		     	 go sendHashTable(failure_data.IP, new_mapping[i].partitionNumber + 1);
		     }
		     partition_primary_map[new_mapping[i].partitionNumber + 1] = ip_serverid_map[my_ip];
		     printPartitionMapping();
		     continue;
	     }
	     
	     // I am responsible to send my partition to the node that has just come up.
	     // Send the hashtable for my partition.	     
       	     if (new_mapping[i].previousPrimary == "" && len(new_mapping[i].currentPrimary) > 0 && 
		 new_mapping[i].currentPrimary == my_ip) {
		     // I am responsible to send my partition to the node that has just come up.
		     // Send the hashtable for my partition.
		     if (failure_data.status == "up") {
		     	 fmt.Printf("In partition manager : going to call hashtable send for partition %d to %s\n", new_mapping[i].partitionNumber + 1, failure_data.IP);
		     	 go sendHashTable(failure_data.IP, new_mapping[i].partitionNumber + 1);
		     }    
		     partition_primary_map[new_mapping[i].partitionNumber + 1] = ip_serverid_map[my_ip];
		     printPartitionMapping();
		     continue;
	     }

	     // Do nothing if the previous and the current primary is the same guy but not me.
	     if (len(new_mapping[i].previousPrimary) > 0 && len(new_mapping[i].currentPrimary) > 0 && 
	         new_mapping[i].previousPrimary == new_mapping[i].currentPrimary) {
 		     partition_primary_map[new_mapping[i].partitionNumber + 1] = ip_serverid_map[new_mapping[i].currentPrimary];
		     printPartitionMapping();
	     	     continue;
	     }

	     
	     if (len(new_mapping[i].previousPrimary) > 0 && new_mapping[i].previousPrimary == my_ip) {
	        // Sync my log with the new guy
		dst_ip := new_mapping[i].currentPrimary;
		dst_serverid := ip_serverid_map[dst_ip];
		partition_number := new_mapping[i].partitionNumber;

		// If the logs seen by him is 0, then send the whole hashtable instead of the logs.
		if (logs_seen_index[partition_number + 1][dst_serverid] > 0) {
 		    go sendLog(dst_ip, partition_number + 1, logs_seen_index[partition_number][dst_serverid]);
		} else {
	     	 fmt.Printf("In partition manager : going to call hashtable send for partition %d to %s\n", new_mapping[i].partitionNumber + 1, failure_data.IP);
 		    go sendHashTable(dst_ip, partition_number + 1);
		}
		logs_seen_index[partition_number + 1][dst_serverid] = getLogEnd(partition_number + 1);

		partition_primary_map[partition_number + 1] = dst_serverid;
		     printPartitionMapping();

		continue;
	     }


	     if (new_mapping[i].previousPrimary == "" && len(new_mapping[i].currentPrimary) > 0) {
		dst_ip := new_mapping[i].currentPrimary;
		dst_serverid := ip_serverid_map[dst_ip];
		partition_number := new_mapping[i].partitionNumber;	        

		partition_primary_map[partition_number + 1] = dst_serverid;
		     printPartitionMapping();
		
		continue;
	     }

	     // if prev != current, then modify my mapping
	     if (len(new_mapping[i].previousPrimary) > 0 && len(new_mapping[i].currentPrimary) > 0 && 
	         new_mapping[i].previousPrimary != new_mapping[i].currentPrimary) {
		     partition_primary_map[new_mapping[i].partitionNumber + 1] = ip_serverid_map[new_mapping[i].currentPrimary];
		     printPartitionMapping();
	     	     continue;
	     }
	     
	     fmt.Printf("None of the code path\n");
	     printPartitionMapping();
	     
	 }
	 fmt.Printf("After processing all the inputs to me\n");
	 printPartitionMapping();
         partition_primary_mutex.Unlock();
	 server_status_mutex.Unlock();
	 logs_seen_mutex.Unlock();              	      	 
     }
}

