package mr

import (
	"fmt"
	"log"
	"net"
	"net/http"
	"net/rpc"
	"os"
	"strings"
	"sync"
	"time"
)

const (
	InMap = iota
	InReduce
)

const (
	Idle = iota
	InProcess
	Completed
)

type Master struct {
	// Your definitions here.

	// Reduce桶数
	NReduce int

	// 当前 master处于什么阶段 Map or Reduce
	Process int

	Task []*Task

	mu sync.Mutex

	done bool
}

type Task struct {
	WorkId   int
	Progress int
	File     string
	DoneFile string
}

// global worker id
var WorkerID = 1

//var PROCESS = InMap

// Your code here -- RPC handlers for the worker to call.

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
//func (m *Master) Example(args *ExampleArgs, reply *ExampleReply) error {
//	reply.Y = args.X + 1
//	return nil
//}

func (m *Master) listen(task *Task, progress int) {
	time.Sleep(time.Second * 5)
	m.mu.Lock()
	defer m.mu.Unlock()
	if m.Process == progress && task.Progress != Completed {
		task.Progress = Idle
	}
}

func (m *Master) GetMap(args *MapArgs, reply *MapReply) error {
	m.mu.Lock()
	defer m.mu.Unlock()
	if m.Process == InMap {
		for i := 0; i < len(m.Task); i++ {
			task := m.Task[i]
			if task.Progress == Idle {
				reply.NReduce = m.NReduce
				reply.Workid = WorkerID
				task.WorkId = WorkerID
				task.Progress = InProcess
				WorkerID++
				reply.Filename = task.File
				reply.Status = SUCCESS
				go m.listen(task, InMap)
				return nil
			}
		}
		reply.Status = ERROR
		return nil
	}
	reply.Status = NEXT
	return nil
}

func (m *Master) MapDone(args *MapDoneArgs, reply *MapDoneReply) error {
	m.mu.Lock()
	defer m.mu.Unlock()
	if m.Process != InMap {
		reply.Status = NEXT
		return nil
	}
	workid := args.Workid
	task, ok := filter(m.Task, func(tk *Task) bool {
		return tk.WorkId == workid && tk.Progress == InProcess
	})
	reply.Status = SUCCESS
	if !ok {
		return nil
	}

	task.Progress = Completed

	// worker会将多文件的文件名拼接成 file1_file2_file3...的形式传递
	task.DoneFile = ""

	files := strings.Split(args.Files, "_")
	for _, f := range files {
		temp := strings.Split(f, "-")
		newName := fmt.Sprintf("m-%s-%s", temp[2], temp[3])
		os.Rename(fmt.Sprintf("m-temp-%s-%s", temp[2], temp[3]), newName)
		task.DoneFile += "_" + newName
	}

	task.DoneFile = task.DoneFile[1:]

	for i := 0; i < len(m.Task); i++ {
		if m.Task[i].Progress != Completed {
			return nil
		}
	}

	//		task["taskId"] = 任务标志
	//
	//		task["Progress"] = 当前任务所处的状态 Idle, InProcess 还是 Completed
	//
	//		task["file"] = 当前任务所对应的文件
	//
	//		task["workId"] = 当前任务分配给的 work

	buckets := make(map[string][]string)
	for i := 0; i < len(m.Task); i++ {
		files := strings.Split(m.Task[i].DoneFile, "_")
		for j := 0; j < len(files); j++ {
			file := strings.Split(files[j], "-")
			if _, ok := buckets[file[2]]; ok {
				buckets[file[2]] = append(buckets[file[2]], files[j])
			} else {
				buckets[file[2]] = []string{files[j]}
			}
		}
	}
	// 分桶结束，下一步要将桶内文件名连接成字符串，存储在 task["file"]中，然后传递给 worker
	m.Task = make([]*Task, 0)
	for _, bucket := range buckets {
		task := &Task{}
		task.WorkId = 0
		task.Progress = Idle
		task.File = strings.Join(bucket, "_")
		m.Task = append(m.Task, task)
	}
	reply.Status = NEXT
	m.Process = InReduce
	WorkerID = 1
	return nil
}

func (m *Master) GetReduce(args *ReduceArgs, reply *ReduceReply) error {
	m.mu.Lock()
	defer m.mu.Unlock()
	if m.Process == InReduce {
		for i := 0; i < len(m.Task); i++ {
			task := m.Task[i]
			if task.Progress == Idle {
				task.WorkId = WorkerID
				reply.ReduceId = WorkerID
				WorkerID++
				task.Progress = InProcess
				reply.Files = task.File
				reply.Status = SUCCESS
				reply.Start = true
				go m.listen(task, InReduce)
				return nil
			}
		}
		reply.Status = ERROR
		return nil
	}
	reply.Status = NEXT
	return nil
}

func (m *Master) ReduceDone(args *ReduceDoneArgs, reply *ReduceDoneReply) error {
	m.mu.Lock()
	defer m.mu.Unlock()
	if m.Process != InReduce {
		reply.Status = ERROR
		return nil
	}
	workid := args.Workid
	task, ok := filter(m.Task, func(tk *Task) bool {
		return tk.WorkId == workid && tk.Progress == InProcess
	})
	if !ok {
		reply.Status = NEXT
		return nil
	}
	task.Progress = Completed

	file := args.File
	temp := strings.Split(file, "-")
	os.Rename(file, fmt.Sprintf("mr-out-%s", temp[3]))

	for i := 0; i < len(m.Task); i++ {
		if m.Task[i].Progress != Completed {
			reply.Status = SUCCESS
			return nil
		}
	}
	reply.Status = NEXT
	m.done = true
	return nil
}

//func (m *Master) HeartBeat(args *HeartBeatArgs, reply *HeartBeatReply) error {
//	if _, ok := m.WorkHashMap[args.Workid]; !ok {
//		m.HeartBeatCache[args.Workid] = 10
//	}
//	go func(m *Master) {
//
//		m.HeartBeatCache
//	}(m)
//	return nil
//}

//
// start a thread that listens for RPCs from worker.go
//
func (m *Master) server() {
	rpc.Register(m)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := masterSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrmaster.go calls Done() periodically to find out
// if the entire job has finished.
//
func (m *Master) Done() bool {
	m.mu.Lock()
	defer m.mu.Unlock()
	return m.done
}

//
// create a Master.
// main/mrmaster.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeMaster(files []string, nReduce int) *Master {
	m := Master{}
	m.NReduce = nReduce
	for i := 0; i < len(files); i++ {
		task := &Task{}
		task.Progress = Idle
		task.File = files[i]
		task.WorkId = i
		m.Task = append(m.Task, task)
	}
	m.Process = InMap
	m.done = false
	m.server()
	return &m
}

func filter(datas []*Task, fn func(*Task) bool) (*Task, bool) {
	var ans *Task
	for i := 0; i < len(datas); i++ {
		if fn(datas[i]) {
			return datas[i], true
		}
	}
	return ans, false
}

//func Map(datas []map[string]string, fn func(map[string]string) map[string]string) []map[string]string {
//	ans := make([]map[string]string, 0)
//	for i := 0; i < len(datas); i++ {
//		ans = append(ans, fn(datas[i]))
//	}
//	return ans
//}
