package mr

import (
	"io/ioutil"
	"log"
	"path"
	"strconv"
	"sync"
	"time"
)
import "net"
import "os"
import "net/rpc"
import "net/http"

type TaskStatus struct {
	DispatchTime time.Time
}

const (
	MapResultDir string = "mout"
)

type Coordinator struct {
	// Your definitions here.
	worker_cnt int

	// 简单起见，全局锁
	global_mutex sync.Mutex

	// 所有的文件名
	files []string

	// Map任务队列，文件名 => 状态
	map_tasks map[string]TaskStatus

	// Reduce任务列表，文件名 => 状态
	reduce_tasks map[string]TaskStatus
}

func (c *Coordinator) Init() {
	// 清除上次的中间文件
	log.SetOutput(ioutil.Discard)
	os.RemoveAll(MapResultDir)
	os.MkdirAll(MapResultDir, os.ModePerm)
	c.map_tasks = make(map[string]TaskStatus)
	c.reduce_tasks = make(map[string]TaskStatus)
	for i := 0; i < NReduce; i++ {
		c.reduce_tasks[strconv.Itoa(i)] = TaskStatus{time.UnixMilli(0)}
	}
}

func (c *Coordinator) SetMapTasks(files []string) {
	for i := range files {
		c.map_tasks[files[i]] = TaskStatus{DispatchTime: time.UnixMilli(0)}
	}
	c.files = files

	log.Printf("Map task: %d\n", len(c.map_tasks))
}

// Your code here -- RPC handlers for the worker to call.

func (c *Coordinator) RegisterWorker(args *RegisterArgs, reply *RegisterReply) error {
	c.global_mutex.Lock()
	reply.Wid = c.worker_cnt
	c.worker_cnt++
	defer c.global_mutex.Unlock()
	return nil
}

func (c *Coordinator) GetTask(args *TaskArgs, reply *TaskReply) error {
	c.global_mutex.Lock()
	defer c.global_mutex.Unlock()
	log.Println("GET TASK")

	reply.Task_type = Empty_T

	if len(c.map_tasks) != 0 {
		// map task
		var target string
		if !getAvailableTask(&c.map_tasks, &target) {
			return nil
		}
		c.map_tasks[target] = TaskStatus{DispatchTime: time.Now()}
		reply.Task_type = Map_T
		reply.Key = target
	} else if len(c.reduce_tasks) != 0 {
		// reduce task
		var target string
		if !getAvailableTask(&c.reduce_tasks, &target) {
			return nil
		}
		c.reduce_tasks[target] = TaskStatus{DispatchTime: time.Now()}
		reply.Task_type = Reduce_T
		reply.Key = target
		reply.Splits = c.files
	}

	return nil
}

func (c *Coordinator) FinishMapTask(args *MapTaskFinishArgs, reply *MapTaskFinishReply) error {
	c.global_mutex.Lock()
	defer c.global_mutex.Unlock()

	if _, ok := c.map_tasks[args.Key]; !ok {
		return nil
	}

	log.Printf("Finish map task: %s\n", args.Key)
	basename := path.Base(args.Key)

	// 目录重命名
	target := path.Join(MapResultDir, basename)
	os.RemoveAll(target)
	err := os.Rename(args.Output, target)
	if err != nil {
		log.Println(err)
		log.Printf("failed to rename dir from %s -> %s\n", args.Output, target)
	} else {
		log.Printf("Done with %s\n", basename)
		delete(c.map_tasks, args.Key)
	}
	return nil
}

func (c *Coordinator) FinishReduceTask(args *ReduceTaskFinishArgs, reply *ReduceTaskFinishReply) error {
	c.global_mutex.Lock()
	defer c.global_mutex.Unlock()

	if _, ok := c.reduce_tasks[args.Key]; !ok {
		return nil
	}

	log.Printf("Finish reduce: %s\n", args.Key)
	log.Printf("outfile: %s\n", args.Output)

	// 移动文件
	target := "mr-out-" + args.Key
	os.RemoveAll(target)
	err := os.Rename(args.Output, target)
	if err != nil {
		log.Println(err)
		log.Printf("failed to rename file from %s -> %s\n", args.Output, target)
	} else {
		log.Printf("Done with Reduce #%s\n", args.Key)
		delete(c.reduce_tasks, args.Key)
	}
	return nil
}

func getAvailableTask(tasks *map[string]TaskStatus, target *string) bool {
	now := time.Now()

	for k, v := range *tasks {
		if now.Sub(v.DispatchTime) > 10*time.Second {
			// 超时，可以分配
			*target = k
			return true
		}
	}
	return false
}

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

//
// start a thread that listens for RPCs from worker.go
//
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
//
func (c *Coordinator) Done() bool {
	c.global_mutex.Lock()
	defer c.global_mutex.Unlock()

	// Your code here.

	return len(c.reduce_tasks) == 0
}

//
// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{}

	// Your code here.
	c.Init()
	c.SetMapTasks(files)

	c.server()
	return &c
}
