package mr

import (
	"fmt"
	"log"
	"net"
	"net/http"
	"net/rpc"
	"os"
	"sync"
	"time"
)

type doingWorker struct {
	timestamp time.Time
	id        int
	files     []string
}

type Coordinator struct {
	// Your definitions here.
	curMapId       int
	MaxMapId       int
	curReduceId    int
	MaxcurReduceId int

	state        int
	doingMaper   map[int]doingWorker
	doingReducer map[int]doingWorker
	nReduce      int
	iReduce      int
	dReduce      int
	dMap         int
	nMap         int
	iMap         int
	lock         sync.Mutex
	files        []string
	mapfiles     []string
}

func (c *Coordinator) assignMap() bool {
	return c.curMapId < c.MaxMapId
}

const TIMEOUT = 10

// Your code here -- RPC handlers for the worker to call.

//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (c *Coordinator) Example(args *ExampleArgs, reply *ExampleReply) error {
	reply.Y = args.X + 1
	return nil
}

func getSliceById(id int, total int, t []string) []string {
	files := []string{}
	i := id
	for i < len(t) {
		files = append(files, t[i])
		i += total
	}

	return files
}
func (c *Coordinator) assignWorker(t string, args *ExampleArgs, reply *ExampleReply) {
	//fmt.Printf("assignWorker [%s]", t)
	// fmt.Println("assignWorker [%s] [%s] [%s]", t, args, reply)
	endTime := time.Now().Add(time.Second * TIMEOUT)
	reply.Woker = t
	reply.EndTime = endTime
	reply.NReduce = c.nReduce
	wmap := c.doingMaper
	if t == "map" {
		reply.Id = c.iMap
		//reply.Files = c.files
		reply.Files = getSliceById(reply.Id, c.nMap, c.files)
		c.iMap++
		if c.iMap >= c.nMap {
			c.state = 1
		}

		// append(c.doingMaper, doingWorker{timestamp: endTime, id: reply.id})
	} else if t == "reduce" {
		wmap = c.doingReducer
		reply.Id = c.iReduce

		reply.Files = getSliceById(reply.Id, c.nReduce, c.files)

		c.iReduce++
		if c.iReduce >= c.nReduce {
			c.state = 3
		}
		// append(c.doingReducer, doingWorker{timestamp: endTime, id: reply.id})
	}
	//fmt.Printf("doingWorker [%s] [%s]\n", t, reply.Files)

	wmap[reply.Id] = doingWorker{timestamp: endTime, id: reply.Id, files: reply.Files}
}

func (c *Coordinator) checkWorkerCrash(t string, args *ExampleArgs, reply *ExampleReply) {
	q := c.doingMaper
	if t == "reduce" {

		q = c.doingReducer
	}

	for k, v := range q {
		if v.timestamp.After(time.Now()) {

			continue
		}
		reply.Id = v.id
		reply.Woker = t
		endTime := time.Now().Add(time.Second * TIMEOUT)
		reply.EndTime = endTime
		reply.Files = v.files
		reply.NReduce = c.nReduce
		reply.NMap = c.nMap
		//fmt.Printf("%v", q[v.id].timestamp)
		//fmt.Printf("%v", q[v.id].timestamp)
		v.timestamp = endTime
		q[k] = v

		break
	}

}

func (c *Coordinator) Request(args *ExampleArgs, reply *ExampleReply) error {
	c.lock.Lock()
	defer c.lock.Unlock()
	//fmt.Printf("%d\n", args.X)
	if c.state == 0 {
		// 分配map
		c.assignWorker("map", args, reply)

	} else if c.state == 1 {
		// 等待map执行结束,查看mapper是否有crash
		c.checkWorkerCrash("map", args, reply)
	} else if c.state == 2 {
		// 分配reducer
		c.assignWorker("reduce", args, reply)

	} else if c.state == 3 {
		// 等待reducer执行结束,查看reducer是否有crash
		c.checkWorkerCrash("reduce", args, reply)

	} else if c.state == 4 {
		// reduce结束
	} else {

	}
	//fmt.Printf("%d %s %s", c.state, reply.Woker, reply.Files)

	return nil
}

func (c *Coordinator) removeWorker(q []doingWorker, args *ExampleArgs) {

}

func (c *Coordinator) Commit(args *ExampleArgs, reply *ExampleReply) error {
	// reply.Y = args.X + 1
	c.lock.Lock()

	defer c.lock.Unlock()
	switch args.Worker {
	case "map":
		worker, ok := c.doingMaper[args.Id]
		if ok && worker.id == args.Id && worker.timestamp.Equal(args.EndTime) {
			delete(c.doingMaper, worker.id)
			c.dMap++
			c.mapfiles = append(c.mapfiles, args.Files...)
			fmt.Printf("files:%v\n", c.mapfiles)
		}
		//if c.iMap >= c.nMap && len(c.doingMaper) == 0 {
		if c.dMap >= c.nMap && len(c.doingMaper) == 0 {
			c.state = 2
			c.files = c.mapfiles
		}
	case "reduce":
		// c.files = args.files
		worker, ok := c.doingReducer[args.Id]
		//if ok && worker.id == args.Id && worker.timestamp == args.EndTime {
		if ok && worker.id == args.Id && worker.timestamp.Equal(args.EndTime) {
			delete(c.doingReducer, worker.id)
			c.dReduce++
		}
		//if c.iReduce >= c.nReduce && len(c.doingReducer) == 0 {
		if c.dReduce >= c.nReduce && len(c.doingReducer) == 0 {
			c.state = 4
		}
	}
	return nil
}

//
// start a thread that listens for RPCs from worker.go
//
func (c *Coordinator) server() {
	rpc.Register(c)
	rpc.HandleHTTP()
	//l, e := net.Listen("tcp", ":1234")
	sockname := coordinatorSock()
	os.Remove(sockname)
	l, e := net.Listen("unix", sockname)
	if e != nil {
		log.Fatal("listen error:", e)
	}
	go http.Serve(l, nil)
}

//
// main/mrcoordinator.go calls Done() periodically to find out
// if the entire job has finished.
//
func (c *Coordinator) Done() bool {
	ret := false
	c.lock.Lock()
	defer c.lock.Unlock()
	// Your code here.
	ret = c.state == 4
	return ret
}

//
// create a Coordinator.
// main/mrcoordinator.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeCoordinator(files []string, nReduce int) *Coordinator {
	c := Coordinator{nMap: 2,doingMaper: make(map[int]doingWorker), doingReducer: make(map[int]doingWorker), files: files, nReduce: nReduce}
	//fmt.Print(files)
	// Your code here.

	c.server()
	return &c
}
