package mr

import (
	"encoding/json"
	"log"
	"net"
	"net/http"
	"net/rpc"
	"os"
	"strconv"
	"sync"
	"time"
)

type taskInfo struct {
	seq        int
	start_time time.Time
	task_id    int
}

type Coordinator struct {
	// Your definitions here.
	nReduce                  int
	origin_filenames         []string
	origin_tasks             []int
	origin_doing_tasks       map[int]taskInfo
	intermidiate_tasks       []int
	intermidiate_doing_tasks map[int]taskInfo
	kv_temp                  []TKeyValue
	worker_num               int
	map_count                int
	lock                     sync.Mutex
	// 增长序号，记录任务的接收者序号
	inc_seq int
}

// Your code here -- RPC handlers for the worker to call.

// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

// rpc请求任务
func (c *Coordinator) TaskAsk(args *RPCRequestBody, reply *RPCResponseBody) error {
	c.lock.Lock()
	defer c.lock.Unlock()
	c.inc_seq++
	reply.Seq = c.inc_seq
	// 先解决所有map，再处理reduce
	if len(c.origin_tasks) != 0 {
		reply.Task_type = "map"
		origin_task := c.origin_tasks[len(c.origin_tasks)-1]
		c.origin_tasks = c.origin_tasks[:len(c.origin_tasks)-1]
		task_id := origin_task
		c.origin_doing_tasks[origin_task] = taskInfo{c.inc_seq, time.Now(), task_id}
		filename := c.origin_filenames[origin_task]
		s, _ := json.Marshal(filename)
		reply.Trans_data = s
		reply.Task_id = task_id
		reply.NReduce = c.nReduce
	} else {
		if len(c.origin_doing_tasks) != 0 {
			reply.Task_type = "wait"
		} else {
			// 认为map阶段已经完成
			if len(c.intermidiate_tasks) != 0 {
				reply.Task_type = "reduce"
				intermidiate_num := c.intermidiate_tasks[len(c.intermidiate_tasks)-1]
				c.intermidiate_tasks = c.intermidiate_tasks[:len(c.intermidiate_tasks)-1]
				reply.NReduce = c.nReduce

				c.intermidiate_doing_tasks[intermidiate_num] = taskInfo{c.inc_seq, time.Now(), intermidiate_num}
				reply.Task_id = intermidiate_num
				reply.Map_count = c.map_count

			} else {
				if len(c.intermidiate_doing_tasks) != 0 {
					reply.Task_type = "wait"
				} else {
					reply.Task_type = "done"
				}
			}
		}
	}

	return nil
}

// rpc响应任务
func (c *Coordinator) TaskDone(args *RPCRequestBody, reply *RPCResponseBody) error {
	c.lock.Lock()
	defer c.lock.Unlock()
	println("done task:" + strconv.Itoa(args.Seq))
	if args.Task_type == "map" {
		tinfo := c.origin_doing_tasks[args.Task_id]
		if tinfo.seq == args.Seq {
			delete(c.origin_doing_tasks, args.Task_id)
			reply.Commit = true
		} else {
			println("Valid Seq: " + strconv.Itoa(tinfo.seq) + " X " + strconv.Itoa(args.Seq))
			reply.Commit = false
		}
	} else if args.Task_type == "reduce" {
		// reduce
		tinfo := c.intermidiate_doing_tasks[args.Task_id]
		if tinfo.seq == args.Seq {
			delete(c.intermidiate_doing_tasks, args.Task_id)
			reply.Commit = true
		} else {
			println("Valid Seq: " + strconv.Itoa(tinfo.seq) + " X " + strconv.Itoa(args.Seq))
			reply.Commit = false
		}
	} else {
		os.Exit(-1)
	}

	return nil
}

// 检查过期任务，并重新加入待执行任务中
func (c *Coordinator) checkExpireTaskAndJoin() {
	EXPIRE_TIME := 24 * time.Second
	for {
		now_time := time.Now()
		c.lock.Lock()
		for file_idx, tinfo := range c.origin_doing_tasks {
			if tinfo.start_time.Add(EXPIRE_TIME).Before(now_time) {
				delete(c.origin_doing_tasks, file_idx)
				c.origin_tasks = append(c.origin_tasks, file_idx)
				println("task expired:" + strconv.Itoa(file_idx))
			}
		}

		for task_id, tinfo := range c.intermidiate_doing_tasks {
			if tinfo.start_time.Add(EXPIRE_TIME).Before(now_time) {
				delete(c.intermidiate_doing_tasks, task_id)
				c.intermidiate_tasks = append(c.intermidiate_tasks, task_id)
				println("task expired:" + strconv.Itoa(task_id))
			}
		}
		c.lock.Unlock()

		time.Sleep(2 * time.Second)
	}

}

// start a thread that listens for RPCs from worker.go
func (c *Coordinator) server() {
	// 读入文件
	filenames := os.Args[1:]
	c.origin_filenames = filenames
	c.map_count = len(filenames)
	c.origin_tasks = []int{}
	for i := 0; i < len(filenames); i++ {
		c.origin_tasks = append(c.origin_tasks, i)
	}
	for i := 0; i < c.nReduce; i++ {
		c.intermidiate_tasks = append(c.intermidiate_tasks, i)
	}

	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	} else {
		println("Listening...")
	}
	go c.checkExpireTaskAndJoin()
	go http.Serve(l, nil)
}

// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
func (c *Coordinator) Done() bool {
	// Your code here.
	c.lock.Lock()
	defer c.lock.Unlock()
	if len(c.origin_tasks) == 0 && len(c.origin_doing_tasks) == 0 && len(c.intermidiate_tasks) == 0 && len(c.intermidiate_doing_tasks) == 0 {
		return true
	}
	return false
}

// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{}
	c.origin_tasks = []int{}
	c.origin_doing_tasks = make(map[int]taskInfo)
	c.intermidiate_tasks = []int{}
	c.intermidiate_doing_tasks = make(map[int]taskInfo)
	c.map_count = 0
	c.inc_seq = 0

	// Your code here.
	c.nReduce = nReduce

	c.server()
	return &c
}
