package mr

import (
	"fmt"
	"log"
	"net"
	"net/http"
	"net/rpc"
	"os"
	"strconv"
	"strings"
	"sync"
	"time"
)

// 定义为全局，worker之间访问coordinator时加锁
var (
	mu sync.Mutex
)

type Coordinator struct {
	// Your definitions here.
	ReducerNum        int   //传入的参数决定需要多少个reducer
	TaskId            int   //用于生成task的特殊id
	DiskPhase         Phase // 目前整个框架应该处于什么任务阶段
	TaskChannelReduce chan *Task
	TaskChannelMap    chan *Task     //使用chan保证并发安全
	taskMetaHolder    TaskMetaHolder //存着task
	files             []string       //传入的文件数组
}

// TaskMetaHolder 保存全部任务的元数据
type TaskMetaHolder struct {
	MetaMap map[int]*TaskMetaInfo // 通过下标hash快速定位
}

// TaskMetaInfo 保存任务的元数据
type TaskMetaInfo struct {
	state     State     // 任务的状态
	StartTime time.Time //任务的开始时间，为crash做任务
	TaskAdr   *Task     // 传入任务的指针,为的是这个任务从通道中取出来后，还能通过地址标记这个任务已经完成
}

// Your code here -- RPC handlers for the worker to call.

// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
//	reply.Y = args.X + 1
//	return nil
//}

// start a thread that listens for RPCs from worker.go
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname) //之前/var/tmp目录下可能有之前创建一样的sock文件
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

// Done 主函数mr调用，如果所有task完成mr会通过此方法退出
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
func (c *Coordinator) Done() bool {

	// Your code here.
	mu.Lock()
	defer mu.Unlock()
	if c.DiskPhase == AllDone {
		fmt.Println("All tasks are finished,the coordinator will be exit!!")
		return true
	}
	return false
}

// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{
		files:             files,
		ReducerNum:        nReduce,
		DiskPhase:         MapPhase,
		TaskChannelMap:    make(chan *Task, len(files)),
		TaskChannelReduce: make(chan *Task, nReduce),
		taskMetaHolder: TaskMetaHolder{
			MetaMap: make(map[int]*TaskMetaInfo, len(files)+nReduce),
		}, // 任务的总数应该是files + Reducer的数量
	}

	// Your code here.
	c.makeMapTasks(files)
	c.server()
	go c.CrashHandler()
	return &c
}

// CrashHandler crash探测协程的实现
func (c *Coordinator) CrashHandler() {
	for {
		time.Sleep(time.Second * 2)
		start := time.Now()
		mu.Lock()
		//defer mu.Unlock() //defer是快要退出函数时候执行，这样当我这个协程执行时一直在for的死循环里面占有互斥锁mu，这样别的进程都无法继续推行下去了
		if c.DiskPhase == AllDone {
			mu.Unlock()
			break
		}
		for _, v := range c.taskMetaHolder.MetaMap {
			if v.state == Working && time.Since(v.StartTime) >= 10*time.Second {
				fmt.Printf("the task %d  is crash,take %d s\n", v.TaskAdr.TaskId, time.Since(v.StartTime)/1e9)
				switch v.TaskAdr.TaskType {
				case MapTask:
					c.TaskChannelMap <- v.TaskAdr
					v.state = Waiting
				case ReduceTask:
					c.TaskChannelReduce <- v.TaskAdr
					v.state = Waiting
				}
			}
		}
		mu.Unlock()
		duration := time.Since(start)
		fmt.Printf("一次探测协程的执行时间为：%v\n",duration)
		// fmt.Printf("当前携程的uid为:%d\n",os.Getuid())
	}
}

// 对map任务进行处理，初始化map任务
func (c *Coordinator) makeMapTasks(files []string) {
	for _, v := range files {
		id := c.generateTaskId()
		task := Task{
			TaskType:   MapTask,
			TaskId:     id,
			ReducerNum: c.ReducerNum,
			FileSlice:  []string{v},
		}
		TaskMetaInfo := TaskMetaInfo{
			state:   Waiting, //任务等待被执行
			TaskAdr: &task,   //保存任务的地址
		}
		c.taskMetaHolder.acceptMeta(&TaskMetaInfo)
		fmt.Println("make a map task :", task)
		c.TaskChannelMap <- &task
	}
}

// 通过结构体的taskid自增来获取唯一的任务id
func (c *Coordinator) generateTaskId() int {
	res := c.TaskId
	c.TaskId++
	return res
}

// 分发任务
func (c *Coordinator) PollTask(args *TaskArgs, reply *Task) error {
	// 分发任务应该上锁，防止多个worker竞争，并用defer回退解锁
	mu.Lock()
	defer mu.Unlock()
	//判断任务类型存任务
	switch c.DiskPhase {
	case MapPhase:
		{
			if len(c.TaskChannelMap) > 0 {
				*reply = *<-c.TaskChannelMap
				if !c.taskMetaHolder.judgeState(reply.TaskId) {
					fmt.Printf("taskid[%d] is running\n", reply.TaskId)
				}
			} else {
				reply.TaskType = WaittingTask //如果map任务被分完了但是又
				// 没有完成，此时就将任务设为waitting
				if c.taskMetaHolder.checkTaskDone() {
					c.toNextPhase()
				}
				return nil
			}
		}
	case ReducePhase:
		{
			if len(c.TaskChannelReduce) > 0 {
				*reply = *<-c.TaskChannelReduce
				if !c.taskMetaHolder.judgeState(reply.TaskId) {
					fmt.Printf("Reduce-taskid[%d] is running\n", reply.TaskId)
				}
			} else {
				reply.TaskType = WaittingTask // 如果map任务被分
				// 发完了但是又没完成，此时就将任务设为Waitting
				if c.taskMetaHolder.checkTaskDone() {
					c.toNextPhase()
				}
				return nil
			}
		}
	case AllDone:
		{
			reply.TaskType = ExitTask
		}
	default:
		panic("The TaskType undefined!!!")
	}
	return nil
}

// 分配任务中转换阶段的实现
func (c *Coordinator) toNextPhase() {
	if c.DiskPhase == MapPhase {
		c.makeReduceTasks()
		c.DiskPhase = ReducePhase
	} else if c.DiskPhase == ReducePhase {
		c.DiskPhase = AllDone
	}

}

// rpc方法,将map任务标记为完成
func (c *Coordinator) MarkFinished(args *Task, reply *Task) error {
	mu.Lock()
	defer mu.Unlock()
	switch args.TaskType {
	case MapTask:
		{
			meta, ok := c.taskMetaHolder.MetaMap[args.TaskId]
			if ok && meta.state == Working {
				meta.state = Done
				fmt.Printf("Map task Id[%d] is finished.\n", args.TaskId)
			} else {
				fmt.Printf("Map task Id[%d] is finished,already ! ! !\n", args.TaskId)
			}

		}
	case ReduceTask:
		{
			meta, ok := c.taskMetaHolder.MetaMap[args.TaskId]
			if ok && meta.state == Working {
				meta.state = Done
				fmt.Printf("Reduce task Id[%d] is finished.\n", args.TaskId)
			} else {
				fmt.Printf("Reduce task Id[%d] is finished,already ! ! !\n", args.TaskId)
			}
		}
	default:
		panic("The task type undefined !!!")
	}
	return nil
}

// Coordinator处理reduce阶段
func (c *Coordinator) makeReduceTasks() {
	for i := 0; i < c.ReducerNum; i++ {
		id := c.generateTaskId()
		task := Task{
			TaskId:    id,
			TaskType:  ReduceTask,
			FileSlice: selectReduceName(i),
		}
		//保存任务的初始状态
		TaskMetaInfo := TaskMetaInfo{
			state:   Waiting, //任务等待被执行
			TaskAdr: &task,   //保存任务的地址
		}
		c.taskMetaHolder.acceptMeta(&TaskMetaInfo)
		c.TaskChannelReduce <- &task
	}
}

// 从运行时的目录找中间文件，选择相应的哈希槽
func selectReduceName(reduceNum int) []string {
	var s []string
	path, _ := os.Getwd()
	files, _ := os.ReadDir(path)
	for _, file := range files {
		if strings.HasPrefix(file.Name(), "mr-tmp") && strings.HasSuffix(file.Name(), strconv.Itoa(reduceNum)) {
			s = append(s, file.Name())
		}
	}
	return s
}

// 检查多少个任务做了包括(map,reduce)
func (t *TaskMetaHolder) checkTaskDone() bool {
	var (
		mapDoneNum      = 0
		mapUnDoneNum    = 0
		reduceDoneNum   = 0
		reduceUnDoneNum = 0
	)
	//遍历存储task信息的map
	for _, v := range t.MetaMap {
		if v.TaskAdr.TaskType == MapTask {
			if v.state == Done {
				mapDoneNum++
			} else {
				mapUnDoneNum++
			}
		} else if v.TaskAdr.TaskType == ReduceTask {
			if v.state == Done {
				reduceDoneNum++
			} else {
				reduceUnDoneNum++
			}
		}
	}
	fmt.Printf("mapDoneNum =%d,mapUnDoneNum =%d,"+
		"reduceDoneNum =%d,reduceUnDoneNum =%d", mapDoneNum, mapUnDoneNum,
		reduceDoneNum, reduceUnDoneNum)
	if (mapDoneNum > 0 && mapUnDoneNum == 0) && (reduceDoneNum == 0 && reduceUnDoneNum == 0) {
		return true //map工作做完了
	} else {
		if reduceDoneNum > 0 && reduceUnDoneNum == 0 {
			//reduce工作做完了
			return true
		}
	}
	return false
}

// 判断给定任务是否在工作，并修正其目前任务信息状态
func (t *TaskMetaHolder) judgeState(taskId int) bool {
	taskInfo, ok := t.MetaMap[taskId]
	if !ok || taskInfo.state != Waiting {
		return false
	}
	taskInfo.StartTime = time.Now()
	taskInfo.state = Working
	return true
}

// 将接受taskMetaInfo存储进MetaHolder里面
func (t *TaskMetaHolder) acceptMeta(TaskInfo *TaskMetaInfo) bool {
	taskId := TaskInfo.TaskAdr.TaskId
	meta, _ := t.MetaMap[taskId]
	if meta != nil {
		fmt.Println("meta contains task which id is ", taskId)
		return false
	} else {
		t.MetaMap[taskId] = TaskInfo
	}
	return true
}
