package mr

import (
	"fmt"
	"log"
	"strconv"
	"sync"
	"time"
)
import "net"
import "os"
import "net/rpc"
import "net/http"

type Master struct {
	// Your definitions here.
	FileAddressList           []string        //the location of processing files
	IntermediaFileAddressSet  map[string]bool // task no -> intermedia file location
	ResultFileAddressSet      map[string]bool
	WorkerAddress             []string        //disused in single machine
	MapTaskProgressState      map[string]bool //in-progress state of map
	ReduceTaskProgressState   map[string]bool
	FileCount                 int
	WorkerState               map[int]bool
	WorkerCount               int
	WorkerNoNameMap           map[int]string    // map : worker No -> worker socket name
	WorkerNameNoMap           map[string]int    // map : worker socket name -> worker No
	MapTaskAssignmentState    map[string]int    // map task no -> worker no
	ReduceTaskAssignmentState map[string]int    //reduce task no -> worker no
	WorkerTaskName            map[string]string //worker sock name -> its task name
	state                     int               //0:map state 1:reduce state 2:finish all
	MapTaskCount              int
	MapTaskFinish             int
	ReduceTaskFinish          int
	ReduceTaskCount           int
	SleepTime                 int //这里加入等待worker的睡眠次数，达到固定次数没有worker响应，master退出
	l                         net.Listener
	Mux                       sync.Mutex
}

// Your code here -- RPC handlers for the worker to call.

func LogPrintln(logContent string) {
	currentTime := strconv.Itoa(int(time.Now().Unix()))
	fmt.Fprintf(os.Stderr, currentTime+" "+logContent+"\n")
}

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (m *Master) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

func (m *Master) CrashFix(flag int, taskStage int, workerName string, taskName string) error {
	//if taskName == "" {
	//	//等于1直接可以
	//	if flag == 4{
	//
	//	}
	//	m.Mux.Lock()
	//	m.WorkerCount--
	//	m.Mux.Unlock()
	//	return nil
	//}

	//当前crash|delay的worker有任务:
	workerNo := m.WorkerNameNoMap[workerName]

	m.Mux.Lock()
	defer m.Mux.Unlock()

	LogPrintln("Start fix crash worker: " + workerName + " its task is :" + taskName)

	if flag == 1 {
		//worker直接crash，
		//1. 首先需要重新调整master记录的worker相关状态
		//2. 查看是否调整worker的任务，（1）任务完成当前没有任务，不需要分发，(2)任务未完成
		m.WorkerCount--
		delete(m.WorkerState, workerNo)
		delete(m.WorkerNameNoMap, workerName)
		delete(m.WorkerNoNameMap, workerNo)
		delete(m.WorkerTaskName, workerName)
	}
	//处理完worker本身，woker的还未完成的任务进行重新分配
	switch taskStage {
	case 0:
		//map
		if taskName != "" && m.MapTaskProgressState[taskName] == false { //任务未完成
			delete(m.MapTaskAssignmentState, taskName)
			delete(m.MapTaskProgressState, taskName)
		}
	case 1:
		if taskName != "" && m.ReduceTaskProgressState[taskName] == false { //任务未完成
			delete(m.ReduceTaskAssignmentState, taskName)
			delete(m.ReduceTaskProgressState, taskName)
		}
	}

	//delay
	if flag == 4 && taskName != "" {
		CallShutdown(workerName)
	}
	//todo 任务重新分配完以后还需要告知worker停止当前的任务
	return nil

}

func (m *Master) Monitor(workerName string) {
	//两次ping中间10s，假设第一次ping worker有任务，而第二次ping
	//几种状态：
	//1. ping失败，告知Master针对worker启动crash fix，监控器退出(flag为1)
	//2. ping成功，但目前worker没有任务，flag为2，sleep 10s
	//3. ping成功，在状态2下，目前worker有任务但还未完成，flag更改为3,sleep 10s
	//4. ping成功，在状态3下，目前worker还未完成任务，flag改为4，同时，告知Master针对worker启动crash fix,sleep 10s
	flag := 0
	LogPrintln("Monitor for " + workerName + " online!")
	for {
		reply, ok := CallPing(workerName)
		LogPrintln(workerName + " PING OK :" + strconv.FormatBool(ok))
		//ping一次，看看
		if ok == true {
			if reply.WorkerCurrentTask == "" {
				flag = 2
				LogPrintln(workerName + " has no task")
			} else {
				if flag == 2 || flag == 0 {
					flag = 3
					LogPrintln(workerName + " is finishing " + reply.WorkerCurrentTask)
				} else if flag == 3 {
					//此时2次ping结束之前1次的ping的任务仍然没有完成，出现了worker过慢的情况
					LogPrintln("Monitor think " + workerName + " sleep!")
					flag = 4
				}
			}
		} else {
			flag = 1
			LogPrintln("Monitor think " + workerName + " crash!")
		}
		//switch ok {
		//case true:
		//	if reply.WorkerName == "" {
		//		flag = 2
		//		LogPrintln(workerName + " has no task")
		//	} else {
		//		switch flag {
		//		case 2:
		//			flag = 3
		//			LogPrintln(workerName + " is finishing " + reply.WorkerCurrentTask)
		//		case 3:
		//			//此时2次ping结束之前1次的ping的任务仍然没有完成，出现了worker过慢的情况
		//			LogPrintln("Monitor think " + workerName + " sleep!")
		//			flag = 4
		//		}
		//	}
		//case false:
		//	//调用失败,此时worker离线
		//	flag = 1
		//	LogPrintln("Monitor think " + workerName + " crash!")
		//}

		if flag == 1 || flag == 4 {
			//todo 这里假如crash了reply里面全是空的，crashfix根本没办法正常运行，导致任务无法重新分配，剩余的worker全部卡住
			m.Mux.Lock()
			state := m.state
			m.Mux.Unlock()
			go m.CrashFix(flag, state, workerName, m.WorkerTaskName[workerName])
			if flag == 1 {
				break
			}
		}

		time.Sleep(10 * time.Second) //沉睡10s
	}
	//end the monitor
	LogPrintln("Monitor for " + workerName + " exit!")
}

func (m *Master) Regist(args *RegistArgs, reply *RegistReply) error {
	//TODO: workerAddress
	m.Mux.Lock()
	defer m.Mux.Unlock()

	//针对当前的worker开启一个监控线程，

	m.WorkerState[m.WorkerCount] = true
	reply.WorkerUuId = m.WorkerCount
	m.WorkerNoNameMap[reply.WorkerUuId] = args.WorkerName
	m.WorkerTaskName[args.WorkerName] = ""

	LogPrintln("A new Worker registed ! and it got uuid :" + strconv.Itoa(reply.WorkerUuId))
	m.WorkerCount = m.WorkerCount + 1
	// uuid start from 0
	go m.Monitor(args.WorkerName) //todo 新建一个针对该worker的该监控线程

	return nil
}

//return a file as a map file
func (m *Master) TaskDistrubution(args *TaskArgs, reply *TaskReply) error {
	m.Mux.Lock()
	defer m.Mux.Unlock()

	(reply.Flag) = 0

	if m.state == 0 {
		for _, file := range m.FileAddressList {
			if _, ok := m.MapTaskAssignmentState[file]; ok == false {
				//LogPrintln("Worker " + strconv.Itoa(args.WorkerId) + " receive a map task and the file is " + file)
				m.MapTaskAssignmentState[file] = args.WorkerId
				m.MapTaskProgressState[file] = false

				m.WorkerTaskName[args.WorkerName] = file

				reply.WorkType = m.state
				reply.FileAddress = file
				reply.TaskNumber = m.MapTaskCount
				(reply.Flag) = 1

				m.MapTaskCount = m.MapTaskCount + 1
				break
			}
		}

		return nil
	} else if m.state == 1 {
		for inteFile, _ := range m.IntermediaFileAddressSet {
			if _, ok := m.ReduceTaskAssignmentState[inteFile]; ok == false {
				//LogPrintln("Worker " + strconv.Itoa(args.WorkerId) + " receive a reduce task and the file is " + inteFile)
				m.ReduceTaskAssignmentState[inteFile] = args.WorkerId
				m.ReduceTaskProgressState[inteFile] = false

				m.WorkerTaskName[args.WorkerName] = inteFile

				reply.WorkType = m.state
				reply.FileAddress = inteFile
				reply.TaskNumber = m.ReduceTaskCount
				(reply.Flag) = 1

				m.ReduceTaskCount = m.ReduceTaskCount + 1
				break
			}
		}
		return nil
	} else {
		LogPrintln("Worker " + strconv.Itoa(args.WorkerId) + " receive no task, try again... ")
		reply.WorkType = 2
		return nil
	}
}

func (m *Master) TaskFinish(args *TaskFinishArgs, reply *TaskFinishReply) error {
	m.Mux.Lock()
	defer m.Mux.Unlock()

	if args.WorkType == 0 {
		//fmt.Println("----------------------------")
		//fmt.Println("Worker", args.WorkerId, "have finished the Map task")
		//fmt.Println("Input file:", args.InputFileName, "Output file:", args.OutputFileName)
		//fmt.Println("----------------------------")
		LogPrintln("Worker " + strconv.Itoa(args.WorkerId) + " has finished the Map task " + (args.InputFileName))
		// Map task finish
		m.MapTaskProgressState[args.InputFileName] = true
		//m.IntermediaFileAddressSet = append(m.IntermediaFileAddressSet, args.OutputFileName)
		//m.IntermediaFileAddressSet[args.OutputFileName] = true
		m.IntermediaFileAddressSet = Union(m.IntermediaFileAddressSet, args.OutputFileName)
		//todo:需要取全集
		m.MapTaskFinish = m.MapTaskFinish + 1
	} else if args.WorkType == 1 {
		//fmt.Println("----------------------------")
		//fmt.Println("Worker", args.WorkerId, "have finished the Reduce task")
		//fmt.Println("Input file:", args.InputFileName, "Output file:", args.OutputFileName)
		//fmt.Println("----------------------------")
		// Reduce task finish
		LogPrintln("Worker " + strconv.Itoa(args.WorkerId) + " has finished the reduce task " + (args.InputFileName))
		m.ReduceTaskProgressState[args.InputFileName] = true
		//m.ResultFileAddressSet = append(m.ResultFileAddressSet, args.OutputFileName)
		//m.ResultFileAddressSet[args.OutputFileName] = true
		m.ResultFileAddressSet = Union(m.ResultFileAddressSet, args.OutputFileName)
		m.ReduceTaskFinish = m.ReduceTaskFinish + 1
	}
	m.WorkerTaskName[m.WorkerNoNameMap[args.WorkerId]] = ""

	return nil
}

func (m *Master) Exit(args *WorkerExitArgs, reply *WorkerExitReply) error {
	m.Mux.Lock()
	defer m.Mux.Unlock()

	m.WorkerState[args.WorkerId] = false
	m.WorkerCount = m.WorkerCount - 1

	return nil

}

//
// start a thread that listens for RPCs from worker.go
//
func (m *Master) server() {
	rpc.Register(m)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := masterSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)

	m.Mux.Lock()
	m.l = l
	m.Mux.Unlock()

	if e != nil {
		log.Fatal("master listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrmaster.go calls () periodically to find out
// if the entire job has finished.
//
func (m *Master) Done() bool {
	m.Mux.Lock()
	defer m.Mux.Unlock()

	m.CheckAllTaskStage() //todo 待删除测试代码

	ret := true
	//if (m.MapTaskCount != 0 && m.ReduceTaskCount != 0) && (m.MapTaskFinish == m.MapTaskCount) && (m.ReduceTaskFinish == m.ReduceTaskCount) {
	//	ret = true
	//}
	// Your code here.

	LogPrintln("Start check task progress....")
	//特判worker，如果全部worker下线，且任务完成那么master退出
	if m.WorkerCount == 0 {
		if m.SleepTime == 20 {
			LogPrintln("No responsed worker, the master end now...")
			return true
		} else {
			LogPrintln("Master is waiting for worker....")
		}
	} else {
		LogPrintln("There are/is " + strconv.Itoa(m.WorkerCount) + " workers are/is working")
	}

	if m.state == 0 {
		LogPrintln("Before checking, the task stage is Mapping")
		//todo 这里可以更换一种判断方式：task finish == task count即可，但是也有问题：考虑到出现故障时的任务重新分配
		for _, v := range m.MapTaskProgressState {
			if v == false {
				ret = false
				break
			}
		}
		if len(m.MapTaskProgressState) == 0 {
			ret = false
		}

		if ret == true {
			m.state = 1
			LogPrintln("After checking, the task stage is Reducing")
		} else {
			LogPrintln("After checking, the task stage is Mapping")
		}

	} else if m.state == 1 {

		LogPrintln("Before checking, the task stage is Reducing")
		for _, v := range m.ReduceTaskProgressState {
			if v == false {
				ret = false
				break
			}
		}

		if len(m.ReduceTaskProgressState) == 0 {
			ret = false
		}

		if ret == true {
			m.state = 2
			LogPrintln("After checking, whole task has finished, Map task count : " + strconv.Itoa(m.MapTaskCount) + " Map task finished count: " + strconv.Itoa(m.MapTaskFinish) + " Reduce task count :  " + strconv.Itoa(m.ReduceTaskCount) + " reduce task finished count: " + strconv.Itoa(m.ReduceTaskFinish))
			defer LogPrintln("Master quit now！！！！")
			return true
		} else {
			LogPrintln("After checking, the task stage is reducing")
		}
	}
	return false
}

//
// create a Master.
// main/mrmaster.go calls this function.
// nReduce is the number of reduce tasks to use.
//

//func (m *Master) CheckWorker() ([]int, bool) {
//	//TODO deploy it in a distributed system
//	errorWorkerList := make([]int, 0)
//	//.... to ping the worker by the address
//	f := false
//	if len(errorWorkerList) == 0 {
//		f = true
//	}
//	// if all worker work well, flag return 1 else 0
//	return errorWorkerList, f
//
//}

func MakeMaster(files []string, nWorker int) *Master {
	LogPrintln("Master generating...")
	m := Master{
		FileAddressList:          files,
		IntermediaFileAddressSet: make(map[string]bool), // todo:待还原
		//IntermediaFileAddressSet: (map[string]bool{
		//	"mr-0": true,
		//	"mr-1": true,
		//	"mr-2": true,
		//	"mr-3": true,
		//	"mr-4": true,
		//}), // todo:待删除的test code
		ResultFileAddressSet:    make(map[string]bool),
		WorkerAddress:           make([]string, nWorker),
		MapTaskProgressState:    make(map[string]bool),
		ReduceTaskProgressState: make(map[string]bool), // todo:待还原
		//ReduceTaskProgressState:   (map[string]bool{"mr-0": false, "mr-1": false, "mr-2": false, "mr-3": false, "mr-4": false,}),// todo:待删除的test code
		FileCount:                 len(files),
		WorkerState:               make(map[int]bool),
		WorkerCount:               0,
		MapTaskAssignmentState:    make(map[string]int),
		ReduceTaskAssignmentState: make(map[string]int),
		WorkerTaskName:            make(map[string]string),
		WorkerNameNoMap:           make(map[string]int),
		WorkerNoNameMap:           make(map[int]string),
		MapTaskCount:              0,
		ReduceTaskCount:           0,
		MapTaskFinish:             0,
		ReduceTaskFinish:          0,
		SleepTime:                 0,
		//state:                     1, // todo:待删除的test code,保留map阶段代码和正确的中间文件直接进入reduce阶段看看会发生什么
	}
	// Your code here.
	//TODO : check the process every second
	//go func(m *Master) {
	//	for m.Done() == false {
	//		ok := true
	//
	//		m.Mux.Lock()
	//
	//		if m.state == 0 {
	//			for _, v := range m.MapTaskProgressState {
	//				if v == false {
	//					ok = false
	//					break
	//				}
	//			}
	//			if ok == true {
	//				m.state = 1
	//			}
	//		} else if m.state == 1 {
	//			for _, v := range m.ReduceTaskProgressState {
	//				if v == false {
	//					ok = false
	//					break
	//				}
	//			}
	//			if ok == true {
	//				break
	//				m.state = 2
	//			}
	//		}
	//
	//		m.Mux.Unlock()
	//
	//		time.Sleep(time.Second * 5)
	//	}
	//}(&m)

	m.server()
	LogPrintln("Master server online...")
	return &m
}

func CallPing(workerName string) (*PingReply, bool) {
	rpcname := "SingleWorker.Ping"
	args := PingArgs{}
	reply := PingReply{
		WorkerName:                     "",
		WorkerCurrentTaskStage:         -1,
		WorkerCurrentTask:              "",
		WorkerCurrentTaskProgressStage: true,
	}
	ok := WorkerCall(workerName, rpcname, &args, &reply)

	return &reply, ok
}

func (m *Master) CheckAllTaskStage() {

	LogPrintln("-------Map Tasks Stage------------")
	for _, file := range m.FileAddressList {
		_, ok := m.MapTaskAssignmentState[file]
		LogPrintln("Map task name: " + file + "Assign stage: " + strconv.FormatBool(ok))
	}
}

func CallShutdown(workerName string) {
	rpcname := "SingleWorker.ShutdownCurrentTask"
	args := ShutdownArgs{}
	reply := ShutdownReply{}
	WorkerCall(workerName, rpcname, &args, &reply)
}
