package mr

import (
	"container/list"
	"encoding/json"
	"errors"
	"fmt"
	"io/ioutil"
	"log"
	"path"
	"path/filepath"
	"regexp"
	"runtime"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)
import "net"
import "os"
import "net/rpc"
import "net/http"

// Task Status
const (
	T_UNASSIGNED = iota
	T_ING
	T_FINISHED
)

// worker Status
const (
	W_UNCONNECTED = iota
	W_CONNECTING
	W_CONNECTED
	W_IDLE
	W_WAITING
	W_WORKING
	W_CLOSED
)

// Task step
const (
	S_MAP = iota
	S_REDUCE
	S_DONE
)

type WorkerInfo struct {
	Status uint32
	Task   *Task
	ch     chan WorkerRes

	workerStatusMutex, workerChMutex *sync.RWMutex
}

type Task struct {
	TaskType string
	Num      uint32
	Content  []string
	Status   uint32
	WorkerId int32

	taskTypeMutex, taskContentMutex *sync.RWMutex
}

type RPCCalls interface {
	NextOperation([]interface{}, *ResponseBody)
	WorkerInit([]interface{}, *ResponseBody)
	TaskWorkResDispatch([]interface{}, *ResponseBody)
	WorkWannaClose([]interface{}, *ResponseBody)
}

//type MapTask struct {
//	path   string
//	Status int
//}
//
//type ReduceTask struct {
//	path   string
//	Status int
//}

type Coordinator struct {
	// Your definitions here.
	mapTasksMutex, reduceTasksMutex, unassignedTasksMutex sync.RWMutex
	mapTasksSlice                                         []*Task
	reduceTasksArr                                        []*Task
	unassignedTasksList                                   list.List
	ingTaskCount                                          uint32
	//unassignedChan										  chan byte
	//ingChan												  chan byte
	overChan chan byte

	workerSliceMutex, idleWorkersListMutex sync.RWMutex
	workerSlice                            []*WorkerInfo
	idleWorkersList                        list.List

	steps          int32
	tasksWaitGroup sync.WaitGroup
}

// Your code here -- RPC handlers for the worker to call.

//
// start a thread that listens for RPCs from worker.go
//
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}

	go http.Serve(l, nil)
}

//
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
//
func (c *Coordinator) Done() bool {
	// Your code here.

	return atomic.LoadInt32(&c.steps) == S_DONE
}

//
// create a Coordinator.
// main/mrcoordinator.go calls this function.
// NReduce is the number of reduce tasks to use.
//
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{}
	// Your code here.
	// 阶段的切换条件
	c.steps = S_MAP
	c.idleWorkersList = list.List{}
	//c.tasksWaitGroup = sync.WaitGroup{}
	//c.tasksWaitGroup.Add(2)
	//c.unassignedChan = make(chan byte)
	//c.ingChan = make(chan byte)
	c.overChan = make(chan byte)
	// 初始化map任务：根据文件分配，一个文件一个任务；并全部设为未分配状态；若已经有map中间文件，则状态设为完成
	dirPath, _ := os.Getwd()
	//pwd := dirPath
	if runtime.GOOS == "linux" {
		dirPath = filepath.Dir(dirPath)
	} else if runtime.GOOS == "darwin" {
		if !strings.Contains(dirPath, "main") {
			dirPath = path.Join(dirPath, "main")
		}
	}
	log.Println("Coordinator: dirPath:", dirPath)
	dirFiles, err := ioutil.ReadDir(dirPath)
	if err != nil {
		log.Fatal(err)
	}
	// 初始化map任务
	for i := 0; i < len(files); i++ {
		reg, _ := regexp.Compile(filepath.Base(files[i]))
		for _, f := range dirFiles {
			if f.IsDir() {
				continue
			}
			// 匹配目录
			matched := reg.MatchString(f.Name())
			if matched {
				absPath := filepath.Join(dirPath, f.Name())
				//taskFile, err := filepath.Rel(pwd, absPath)
				//if err != nil {
				//	log.Println("Coordinator: MakeCoordinator: parse file relative path failed, file is", f.Name())
				//}
				var fileSlice []string
				fileSlice = append(fileSlice, absPath)
				c.createTask("map", uint32(len(c.mapTasksSlice)), &fileSlice, T_UNASSIGNED)
				c.unassignedTasksList.PushBack(c.mapTasksSlice[len(c.mapTasksSlice)-1])
				//log.Printf("%p, %p", &c.mapTasksSlice[len(c.mapTasksSlice)-1], c.unassignedTasksList.Back())
			}
		}
	}
	log.Println("Coordinator: initialed", len(c.mapTasksSlice), "map tasks")

	// 初始化reduce任务：创建nReduce个空tasks
	for i := 0; i < nReduce; i++ {
		var status uint32 = T_UNASSIGNED
		fileName := fmt.Sprintf("mr-out-%d", i)
		_, err := os.Stat(fileName) //os.Stat获取文件信息
		if err != nil {
			if os.IsExist(err) {
				// 如果存在已完成的输出文件，则将对应任务设成已完成
				status = T_FINISHED
			}
		}
		c.createTask("reduce", uint32(i), new([]string), status)
	}
	log.Println("Coordinator: initialed", len(c.reduceTasksArr), "reduce tasks")

	// 观察任务状态, 切换任务种类
	go func() {
		for {
			//c.tasksWaitGroup.Wait()
			//<- c.unassignedChan
			//<- c.ingChan
			<-c.overChan
			switch c.steps {
			case S_MAP:
				atomic.StoreInt32(&c.steps, S_REDUCE)
				log.Println("Coordinator: step routine: step switched to REDUCE")
				c.reduceTasksMutex.RLock()
				for i := 0; i < nReduce; i++ {
					if c.reduceTasksArr[i].Status == T_UNASSIGNED {
						c.unassignedTasksMutex.Lock()
						c.unassignedTasksList.PushBack(c.reduceTasksArr[i])
						c.unassignedTasksMutex.Unlock()
					}
				}
				c.reduceTasksMutex.RUnlock()
				//c.tasksWaitGroup.Add(2)
			case S_REDUCE:
				atomic.StoreInt32(&c.steps, S_DONE)
				log.Println("Coordinator: steps routine: step switched to DONE")
				return
			}
		}
	}()

	// 开启rpc server，监听client
	c.server()
	log.Println("Coordinator: waiting for first worker joining...")

	return &c
}

func (c *Coordinator) WorkerInit(req []interface{}, res *ResponseBody) error {
	if c.steps == S_DONE {
		return errors.New("WorkerInit Error: There has no work to do")
	}
	res.Type_id = RES_INFO
	res.Desc = "init"
	c.workerSliceMutex.RLock()
	initStr, _ := json.Marshal(InitInfoBody{len(c.workerSlice), len(c.reduceTasksArr)})
	c.workerSliceMutex.RUnlock()
	res.Content = string(initStr)
	c.workerSliceMutex.Lock()
	c.workerSlice = append(c.workerSlice, &WorkerInfo{W_IDLE, nil, make(chan WorkerRes),
		new(sync.RWMutex), new(sync.RWMutex)})
	c.workerSliceMutex.Unlock()
	//c.idleWorkersList.PushBack(c.workerSlice[len(c.workerSlice)-1])

	log.Println("Coordinator: Worker", len(c.workerSlice)-1, "init done")
	return nil
}

func (c *Coordinator) minusIngCount() {
	if atomic.AddUint32(&c.ingTaskCount, ^uint32(0)) == 0 {
		//c.tasksWaitGroup.Done()
		c.unassignedTasksMutex.RLock()
		if c.unassignedTasksList.Len() == 0 {
			c.overChan <- 1
		}
		c.unassignedTasksMutex.RUnlock()
	}
	//log.Println("Coordinator: minusIngCount: ing task left", atomic.LoadUint32(&c.ingTaskCount), ", unassigned left", c.unassignedTasksList.Len())
}

func (c *Coordinator) removeUnassignedTask(ele *list.Element) {
	c.unassignedTasksList.Remove(ele)
	//if c.unassignedTasksList.Len() == 0 {
	//	//c.tasksWaitGroup.Done()
	//	//c.unassignedChan <- 1
	//}
}

func (c *Coordinator) createTask(taskType string, num uint32, content *[]string, status uint32) bool {
	task := new(Task)
	*task = Task{
		taskType,
		num,
		*content,
		status,
		-1,
		new(sync.RWMutex),
		new(sync.RWMutex),
	}

	switch taskType {
	case "map":
		c.mapTasksMutex.Lock()
		c.mapTasksSlice = append(c.mapTasksSlice, task)
		c.mapTasksMutex.Unlock()
	case "reduce":
		c.reduceTasksMutex.Lock()
		c.reduceTasksArr = append(c.reduceTasksArr, task)
		c.reduceTasksMutex.Unlock()
	default:
		return false
	}

	return true
}

func (c *Coordinator) popNextTask() *Task {
	if atomic.LoadInt32(&c.steps) == S_DONE {
		return nil
	}
	log.Println("Coordinator: popNextTask:", "checked in working step")
	for {
		c.unassignedTasksMutex.Lock()
		if c.unassignedTasksList.Len() == 0 {
			c.unassignedTasksMutex.Unlock()

			time.Sleep(10 * time.Millisecond)
		} else {
			//c.unassignedTasksMutex.RUnlock()
			break
		}
	}

	//c.unassignedTasksMutex.Lock()
	ele := c.unassignedTasksList.Front()
	task := (ele.Value).(*Task)
	//c.unassignedTasksMutex.RUnlock()
	c.removeUnassignedTask(ele)
	c.unassignedTasksMutex.Unlock()
	atomic.StoreUint32(&task.Status, T_ING)
	atomic.AddUint32(&c.ingTaskCount, 1)
	log.Println("Coordinator: popNextTask:", "going to return task")
	return task
}

func (c *Coordinator) NextOperation(req []interface{}, res *ResponseBody) error {
	if len(req) != 1 {
		return errors.New("NextOperation Error: Wrong params Num")
	}
	workerId := (req[0]).(int)
	c.workerSliceMutex.RLock()
	worker := c.workerSlice[workerId]
	c.workerSliceMutex.RUnlock()
	log.Println("Coordinator: worker", workerId, "asking for next op")
	//STATUS_SWITCH:
	switch atomic.LoadUint32(&worker.Status) {
	case W_IDLE:
		task := c.popNextTask()
		log.Println("Coordinator: worker", workerId, ", popped a task, going to check it")
		if task == nil {
			log.Println("Coordinator: worker", workerId, ": no task assigned")
			res.Type_id = RES_CMD
			res.Desc = "close"
			res.Content = ""
			return nil
			//worker.Status = W_CLOSED
			//goto STATUS_SWITCH
		}
		if atomic.CompareAndSwapUint32(&worker.Status, W_IDLE, W_WORKING) {
			atomic.StoreInt32(&task.WorkerId, int32(workerId))
			worker.Task = task
			contentBytes, _ := json.Marshal(*task)
			res.Type_id = RES_TASK
			res.Desc = "assign Task"
			res.Content = string(contentBytes)
			log.Println("Coordinator: worker", workerId, ": task", task.Num, "assigned successfully")
			go c.waitForDone(task)
		} else {
			// swap失败
			log.Println("Coordinator: worker", workerId, "no op assigned, because status is ", worker.Status)
			res.Type_id = RES_INFO
			res.Desc = "waiting"
			res.Content = fmt.Sprintf("couldn't be assigned task because of status %d", worker.Status)
			return nil
			//worker.Status = W_WORKING
			//goto STATUS_SWITCH
		}
	case W_CLOSED:
		res.Type_id = RES_CMD
		res.Desc = "close"
		res.Content = ""
		return nil
	default:
		log.Println("Coordinator: worker", workerId, "no op assigned, because status is ", worker.Status)
		res.Type_id = RES_INFO
		res.Desc = "waiting"
		res.Content = fmt.Sprintf("couldn't be assigned task because of status %d", worker.Status)
		return nil
	}

	log.Println("Coordinator: worker", workerId, " assigned op: ", res)
	return nil
}

func (c *Coordinator) waitForDone(task *Task) {
	c.workerSliceMutex.RLock()
	worker := c.workerSlice[task.WorkerId]
	c.workerSliceMutex.RUnlock()
	worker.workerChMutex.Lock()
	worker.ch = make(chan WorkerRes)
	worker.workerChMutex.Unlock()
	tchan := make(chan struct{})

	go func() {
		tm := time.NewTimer(TIME_LIMIT)
		log.Println("Coordinator: waitForDone: waiting for", "worker", task.WorkerId, "done...")
		select {
		case <-tchan:
			tm.Stop()
		case <-tm.C:
			info := WorkerRes{
				task.WorkerId,
				"expired",
				make([]string, 0),
			}
			worker.ch <- info
			//log.Println(fmt.Sprintf("Coordinator: waitForDone: %p, %p; worker%d timeout", thisWorkerChan, &worker.ch, task.WorkerId))
		}
	}()

	// 传来ch的slice：[0]是信息，[1]是done情况下带的文件地址（string或slice）
	wres := <-worker.ch
	//log.Println("Coordinator: waitForDone:", "worker", task.WorkerId, "got res")
	close(tchan)
	close(worker.ch)

	switch wres.Info {
	case "done":
		log.Println("Coordinator: waitForDone:", "worker", task.WorkerId, "handling after task", task.Num, "done")
		c.minusIngCount()
		// 更改任务状态
		atomic.StoreUint32(&task.Status, T_FINISHED)
		// worker状态更改
		atomic.StoreUint32(&worker.Status, W_IDLE)
		//c.workerSliceMutex.Lock()
		//c.workerSlice[task.WorkerId].Status = W_IDLE
		//c.workerSliceMutex.Unlock()
		//worker.Task = nil
		// 对task的结果进行处理
		var taskType string
		task.taskTypeMutex.RLock()
		taskType = (*task).TaskType
		task.taskTypeMutex.RUnlock()
		switch taskType {
		case "map":
			for _, filePath := range wres.Content {
				if len(filePath) == 0 {
					continue
				}
				j := strings.LastIndex(filePath, "-")
				rd_n, err := strconv.Atoi(filePath[j+1:])
				if err != nil {
					log.Println("intermediate file filePath wrong, filePath is ", filePath)
					continue
				}
				//c.reduceTasksArr[rd_n].Content += fmt.Sprintf(",%s", filePath)
				c.reduceTasksMutex.Lock()
				c.reduceTasksArr[rd_n].Content = append(c.reduceTasksArr[rd_n].Content, filePath)
				c.reduceTasksMutex.Unlock()
			}
		case "reduce":
			// content是string
		}

	//case "failed":

	case "failed", "expired":
		// task设为未分配，worker被认为关闭，ing计数减一
		log.Println("Coordinator: waitForDone:", "worker", task.WorkerId, wres.Info)
		atomic.StoreUint32(&task.Status, T_UNASSIGNED)
		atomic.StoreUint32(&worker.Status, W_CLOSED)
		//worker.Task = nil
		atomic.StoreInt32(&task.WorkerId, -1)
		c.unassignedTasksMutex.Lock()
		c.unassignedTasksList.PushBack(task)
		c.unassignedTasksMutex.Unlock()
		c.minusIngCount()
	default:
		log.Println("Coordinator: waitForDone:", "worker", task.WorkerId, "unknown info:", wres.Info)
	}

}

func (c *Coordinator) TaskWorkResDispatch(result WorkerRes, res *ResponseBody) error {
	if result.WorkerId < 0 || result.WorkerId >= int32(len(c.workerSlice)) {
		return errors.New("TaskWorkResDispatch: WorkerId is illegal")
	}
	worker := c.workerSlice[result.WorkerId]

	worker.workerChMutex.Lock()
	worker.ch <- result
	worker.workerChMutex.Unlock()

	return nil
}

func (c *Coordinator) WorkerWannaClose(req []interface{}, res *ResponseBody) {
	if len(req) != 1 {
		log.Panic("WorkerWannaClose param Num is illegal")
	}
	workerId := (req[0]).(int)
	if workerId < 0 || workerId >= len(c.workerSlice) {
		log.Panic("TaskWorkResDispatch: WorkerId is illegal")
	}
	worker := c.workerSlice[workerId]

	switch worker.Status {
	case W_IDLE:

	case W_WORKING:
		wres := WorkerRes{
			int32(workerId),
			"failed",
			make([]string, 0),
		}
		worker.ch <- wres

		task := worker.Task
		atomic.StoreUint32(&task.Status, T_UNASSIGNED)
		atomic.StoreInt32(&task.WorkerId, -1)
		c.unassignedTasksMutex.Lock()
		c.unassignedTasksList.PushBack(task)
		c.unassignedTasksMutex.Unlock()
		c.minusIngCount()
	}
	atomic.StoreUint32(&worker.Status, W_CLOSED)
	close(worker.ch)
	//worker.ch = nil
	//worker.Task = nil
}
