package mr

import (
	"log"
	"net"
	"net/http"
	"net/rpc"
	"os"
	"sync"
	"time"
)

type Coordinator struct {
	// Your definitions here.
	MapComplete    int
	ReduceComplete int
	MapTaskNum     int
	ReduceTaskNum  int
	MapTasks       []WorkInfo
	ReduceTasks    []WorkInfo
}

var mu sync.Mutex

// Your code here -- RPC handlers for the worker to call.

func (c *Coordinator) RequestWork(n *NullArgs, w *WorkInfo) error {
	mu.Lock()
	defer mu.Unlock()
	w.MapTaskNum = c.MapTaskNum
	w.ReduceTaskNum = c.ReduceTaskNum
	for id := 0; id < c.MapTaskNum; id++ {
		task := c.MapTasks[id]
		if task.Status == 0 {
			c.MapTasks[id].Status = 1
			w.Filename = task.Filename
			w.Kind = 0
			w.TaskId = id
			c.MapTasks[id].t = time.Now()
			return nil
		}
	}
	for id := 0; id < c.MapTaskNum; id++ {
		task := c.MapTasks[id]
		if task.Status == 1 {
			if time.Now().Sub(task.t) < time.Duration(10*time.Second) {
				w.Kind = 2
				return nil
			}
			// fmt.Println("Map ", id, " Need wait!,Retry")
			w.Filename = task.Filename
			w.Kind = 0
			w.TaskId = id
			c.MapTasks[id].t = time.Now()
			// fmt.Println("Return Retry Map ID ", w.TaskId)
			return nil
		}
	}
	// fmt.Println("All Map Tasks End")
	for id := 0; id < c.ReduceTaskNum; id++ {
		task := c.ReduceTasks[id]
		if task.Status == 0 {
			c.ReduceTasks[id].Status = 1
			w.Kind = 1
			w.TaskId = id
			c.ReduceTasks[id].t = time.Now()
			return nil
		}
	}
	for id := 0; id < c.ReduceTaskNum; id++ {
		task := c.ReduceTasks[id]
		if task.Status == 1 {
			if time.Now().Sub(task.t) < time.Duration(10*time.Second) {
				w.Kind = 2
				return nil
			}
			w.Kind = 1
			w.TaskId = id
			c.ReduceTasks[id].t = time.Now()
			// fmt.Println("Return Retry Reduce ID ", w.TaskId)
			return nil
		}
	}
	// fmt.Println("All Reduce Tasks End")
	w.Kind = -1
	// fmt.Println("All done ")
	return nil

}

func (c *Coordinator) CompleteTaskNotify(info *WorkInfo, n *NullArgs) error {
	mu.Lock()
	defer mu.Unlock()
	if info.Kind == 0 {
		c.MapTasks[info.TaskId].Status = 2
	} else if info.Kind == 1 {
		c.ReduceTasks[info.TaskId].Status = 2
	}
	return nil
}

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

//
// start a thread that listens for RPCs from worker.go
//
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
//
func (c *Coordinator) Done() bool {
	mu.Lock()
	defer mu.Unlock()
	ret := false
	for id := 0; id < len(c.ReduceTasks); id++ {
		task := c.ReduceTasks[id]
		if task.Status != 2 {
			return false
		}
	}
	// Your code here.
	ret = true
	return ret
}

//
// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{}

	// Your code here.
	c.MapTaskNum = len(files)
	c.ReduceTaskNum = nReduce
	c.MapTasks = make([]WorkInfo, c.MapTaskNum)
	c.ReduceTasks = make([]WorkInfo, c.ReduceTaskNum)
	// c.MapComplete = 0
	// c.ReduceComplete = 0
	for id, filename := range files {
		// fmt.Println("FileName = ", filename)
		w := WorkInfo{0, id, 0, nReduce, len(files), filename, time.Now()}
		c.MapTasks[id] = w
	}
	// fmt.Println("MAP TASKS:")
	// PrintTasksStatus(c.MapTasks)

	for i := 0; i < c.ReduceTaskNum; i++ {
		w := WorkInfo{1, i, 0, nReduce, len(files), "", time.Now()}
		c.ReduceTasks[i] = w
	}
	// fmt.Println("REDUCE TASKS:")
	// PrintTasksStatus(c.ReduceTasks)
	c.server()
	return &c
}

// func PrintTasksStatus(w []WorkInfo) {
// 	// fmt.Println("ID", "\t", "Kind", "\t", "TASK ID", " ", "STATUS")
// 	for id, info := range w {
// 		// fmt.Println(id, "\t", info.Kind, "\t", info.TaskId, "\t", info.Status)
// 	}
// }
