package mr

import "log"
import "net"
import "os"
import "sync"
import "time"
import "net/rpc"
import "net/http"

const (
	IDLE_TASK = iota
	IN_PROCESSING
	COMPLETED
)

type MapTask struct {
	TaskId int
	TaskStatus int
	TaskInputFile string
	TaskLaunchTime int64
}

type ReduceTask struct {
	TaskId int
	TaskStatus int
	InputFilePath []string
	ExpectedFileSize []int
	TaskLaunchTime int64
}

type Coordinator struct {
	// Your definitions here.
	MapTaskRecord []MapTask
	ReduceTaskRecord []ReduceTask
	mutex sync.Mutex
	MapTaskNum int
	ReduceTaskNum int
	FinishFlag bool
}

// Your code here -- RPC handlers for the worker to call.

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

//
// Assign a task to worker
func (c *Coordinator) AssignTask(reply *ExampleReply, task *AssignedTask) error {
	if (c.FinishFlag) {
		task.TaskType = NO_TASK
		return nil
	}
	c.mutex.Lock()
	still_in_process := false
	for i := 0; i < c.MapTaskNum; i++ {
		if (c.MapTaskRecord[i].TaskStatus == IDLE_TASK) {
			task.TaskId = c.MapTaskRecord[i].TaskId
			task.TaskType = MAP_TASK
			task.NumOfOutput = c.ReduceTaskNum
			task.InputFileNames = make([]string, 1)
			task.InputFileNames[0] = c.MapTaskRecord[i].TaskInputFile
			c.MapTaskRecord[i].TaskStatus = IN_PROCESSING
			c.MapTaskRecord[i].TaskLaunchTime = time.Now().Unix()
			c.mutex.Unlock()
			return nil 
		} else {
			still_in_process = still_in_process || (c.MapTaskRecord[i].TaskStatus == IN_PROCESSING)
		}
	}
	if (still_in_process) {
		task.TaskType = TEMP_NO_TASK
		c.mutex.Unlock()
		return nil
	}
	for i := 0; i < c.ReduceTaskNum; i++ {
		if (c.ReduceTaskRecord[i].TaskStatus == IDLE_TASK) {
			task.TaskId = c.ReduceTaskRecord[i].TaskId
			task.TaskType = REDUCE_TASK
			task.NumOfOutput = 0
			task.InputFileNames = make([]string, c.MapTaskNum)
			for i := 0; i < c.MapTaskNum; i++ {
				task.InputFileNames[i] = c.ReduceTaskRecord[task.TaskId].InputFilePath[i]
			}
			c.ReduceTaskRecord[i].TaskStatus = IN_PROCESSING
			c.ReduceTaskRecord[i].TaskLaunchTime = time.Now().Unix()
			c.mutex.Unlock()
			return nil
		}
	}
	task.TaskType = TEMP_NO_TASK
	c.mutex.Unlock()
	reply.Y = 0
	return nil
}

func (c *Coordinator) TaskCompleted(args *FinishedTask, reply *ExampleReply) error {
        c.mutex.Lock()
	task_id := args.TaskId
	switch args.TaskType {
	case MAP_TASK:
		if (c.MapTaskRecord[task_id].TaskStatus != COMPLETED) {
			c.MapTaskRecord[task_id].TaskStatus = COMPLETED
			for i := 0; i < c.ReduceTaskNum ; i++ {
				c.ReduceTaskRecord[i].InputFilePath[task_id] = args.OutputFileNames[i]
			}
		}
		break
	case REDUCE_TASK:
		if (c.ReduceTaskRecord[task_id].TaskStatus != COMPLETED) {
			c.ReduceTaskRecord[task_id].TaskStatus = COMPLETED
		}
		break
	default:
	}
	c.mutex.Unlock()
	reply.Y = 0
	return nil
}

//
// monitor function
// 1. send heart beat packet to each worker
// 2. check if worker is still online, if now minus worker last active time exceed max time, the worker is considered as offline
// 3. finally check if all works have been completed
func (c *Coordinator) Monitor() {
	for ; ; {
		all_task_finished := true
		now := time.Now().Unix()
		c.mutex.Lock()
		for i := 0; i < c.ReduceTaskNum; i++ {
			all_task_finished = all_task_finished && c.ReduceTaskRecord[i].TaskStatus == COMPLETED
		}
		if (all_task_finished) {
			c.FinishFlag = true
			c.mutex.Unlock()
			return
		}
		for i := 0; i < c.MapTaskNum; i++ {
			if (c.MapTaskRecord[i].TaskStatus == IN_PROCESSING && now - c.MapTaskRecord[i].TaskLaunchTime >= 10) {
				c.MapTaskRecord[i].TaskStatus = IDLE_TASK
			}
		}
		for i := 0; i < c.ReduceTaskNum; i++ {
			if (c.ReduceTaskRecord[i].TaskStatus == IN_PROCESSING && now - c.ReduceTaskRecord[i].TaskLaunchTime >= 10) {
				c.ReduceTaskRecord[i].TaskStatus = IDLE_TASK
			}
		}
		c.mutex.Unlock()
		time.Sleep(42 * time.Millisecond)
	}
}


//
// start a thread that listens for RPCs from worker.go
//
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
//
func (c *Coordinator) Done() bool {
	ret := false

	// Your code here.
	ret =  c.FinishFlag
	return ret
}


//
// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{}

	log.Printf("There are %v map_tasks and %v reduce_tasks", len(files), nReduce)
	c.MapTaskNum = len(files)
	c.ReduceTaskNum = nReduce
	// Your code here.
	c.MapTaskRecord = make([]MapTask, len(files))
	c.ReduceTaskRecord = make([]ReduceTask, nReduce)
	for file_index, file_name := range files {
		c.MapTaskRecord[file_index].TaskId = file_index
		c.MapTaskRecord[file_index].TaskStatus = IDLE_TASK
		c.MapTaskRecord[file_index].TaskInputFile = file_name
	}
	for i := 0; i < nReduce; i++ {
		c.ReduceTaskRecord[i].TaskId = i
		c.ReduceTaskRecord[i].TaskStatus = IDLE_TASK
		c.ReduceTaskRecord[i].InputFilePath = make([]string, len(files))
		c.ReduceTaskRecord[i].ExpectedFileSize = make([]int, len(files))
	}
	go c.Monitor()
	c.server()
	return &c
}
