package mr

import (
	"6.824/raft"
	"log"
	"net"
	"net/http"
	"net/rpc"
	"os"
	"sync"
	"sync/atomic"
	"time"
)

const (
	TaskStatusReady   = 0
	TaskStatusQueue   = 1
	TaskStatusRunning = 2
	TaskStatusFinish  = 3
	TaskStatusErr     = 4
	MaxTaskRunTime    = time.Second * 10
	ScheduleInterval  = time.Millisecond * 2000
)

type Coordinator struct {
	// Your definitions here.
	files      []string
	nReduce    int
	taskPhase  TaskPhase
	taskStats  []TaskStat
	mu         sync.Mutex
	done       bool
	workerSeq  int
	taskCh     chan Task
	finishTask int32
	taskNum    int
}

// Your code here -- RPC handlers for the worker to call.

func (c *Coordinator) GetOneTask(args *TaskArgs, reply *TaskReply) error {
	log.Println("get task exe")
	for i := 0; i < c.nReduce; i++ {
		task := <-c.taskCh
		raft.DPrintf("%d get from ch %+v", c.taskPhase, task)
		reply.Task = &task
		if task.Alive {
			if c.registerTask(args, &task) {
				raft.DPrintf("%d get task succcess", args.WorkId)
				return nil
			}
		}
	}
	raft.DPrintf("%d get task error", args.WorkId)
	return nil
}

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

//
// start a thread that listens for RPCs from worker.go
//
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
//
func (c *Coordinator) Done() bool {
	ret := false

	// Your code here.
	c.mu.Lock()
	defer c.mu.Unlock()
	ret = c.done
	return ret
}

//
// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{}
	// Your code here.
	c.mu = sync.Mutex{}
	c.nReduce = nReduce
	c.files = files
	if c.nReduce > len(c.files) {
		c.taskCh = make(chan Task, c.nReduce)
	} else {
		c.taskCh = make(chan Task, len(c.files))
	}
	c.initMapTask()
	go c.tickSchedule()
	c.server()
	raft.DPrintf("Coordinator init success nReduce %d files %d\n", nReduce, len(files))
	return &c
}

func (c *Coordinator) registerTask(args *TaskArgs, task *Task) bool {
	raft.DPrintf("registerTask exe %+v \n", args)
	c.mu.Lock()
	defer c.mu.Unlock()
	if task.Phase != c.taskPhase {
		return false
	}
	c.taskStats[task.Seq].Status = TaskStatusRunning
	c.taskStats[task.Seq].WorkId = args.WorkId
	c.taskStats[task.Seq].StartTime = time.Now()
	raft.DPrintf("registerTask success\n")
	return true
}

func (c *Coordinator) ReportTask(args *ReportTaskArgs, reply *ReportTaskReply) error {
	raft.DPrintf("ReportTask exe %d ---- : %+v", c.taskStats[args.Seq].Status, args)
	c.mu.Lock()
	defer c.mu.Unlock()
	if args.Done {
		if !c.taskStats[args.Seq].complete {
			c.taskStats[args.Seq].complete = true
			c.taskStats[args.Seq].Status = TaskStatusFinish
			atomic.AddInt32(&c.finishTask, 1)
		}
	} else {
		c.taskStats[args.Seq].Status = TaskStatusErr
	}
	return nil
}

func (c *Coordinator) tickSchedule() {
	for !c.Done() {
		c.mu.Lock()
		if atomic.LoadInt32(&c.finishTask) == int32(c.taskNum) {
			if c.taskPhase == MapPhase {
				c.mu.Unlock()
				c.initReduceTask()
				continue
			} else {
				c.done = true
			}
		}
		c.mu.Unlock()
		time.Sleep(ScheduleInterval)
	}
}

func (c *Coordinator) taskSchedule(taskSeq int) {
	for {
		if c.Done() {
			return
		}
		c.mu.Lock()
		switch c.taskStats[taskSeq].Status {
		case TaskStatusReady:
			c.taskCh <- c.getTask(taskSeq)
			c.taskStats[taskSeq].Status = TaskStatusQueue
			raft.DPrintf("%d put 2 ch ready  %+v", c.taskPhase, c.getTask(taskSeq))
		case TaskStatusQueue:
		case TaskStatusRunning:
			if time.Since(c.taskStats[taskSeq].StartTime) > MaxTaskRunTime {
				if !c.taskStats[taskSeq].complete {
					c.taskStats[taskSeq].Status = TaskStatusQueue
					c.taskCh <- c.getTask(taskSeq)
					raft.DPrintf("%d put 2 ch timeout %+v", c.taskPhase, c.getTask(taskSeq))
				} else {
					c.mu.Unlock()
					return
				}
			}
		case TaskStatusFinish:
			c.mu.Unlock()
			return
		case TaskStatusErr:
			c.taskStats[taskSeq].Status = TaskStatusQueue
			c.taskCh <- c.getTask(taskSeq)
			raft.DPrintf("%d put 2 ch error %+v", c.taskPhase, c.getTask(taskSeq))
		}
		c.mu.Unlock()
		time.Sleep(ScheduleInterval)
	}
}

func (c *Coordinator) getTask(taskSeq int) Task {
	task := Task{
		FileName: "",
		NReduce:  c.nReduce,
		NMaps:    len(c.files),
		Seq:      taskSeq,
		Phase:    c.taskPhase,
		Alive:    true,
	}

	if task.Phase == MapPhase {
		task.FileName = c.files[taskSeq]
	}
	return task
}

func (c *Coordinator) initMapTask() {
	c.mu.Lock()
	defer c.mu.Unlock()
	raft.DPrintf("Init Map Task\n")
	c.taskPhase = MapPhase
	c.taskStats = make([]TaskStat, len(c.files))
	c.taskNum = len(c.files)
	c.finishTask = 0
	for index := range c.taskStats {
		go c.taskSchedule(index)
	}
}

func (c *Coordinator) initReduceTask() {
	c.mu.Lock()
	defer c.mu.Unlock()
	raft.DPrintf("Init Reduce Task")
	c.taskPhase = ReducePhase
	c.taskStats = make([]TaskStat, c.nReduce)
	c.taskNum = c.nReduce
	c.finishTask = 0
	for index := range c.taskStats {
		go c.taskSchedule(index)
	}
}

func (c *Coordinator) RegisterWorker(args *RegisterArgs, reply *RegisterReply) error {
	c.mu.Lock()
	defer c.mu.Unlock()
	c.workerSeq++
	reply.WorkerId = c.workerSeq
	return nil
}
