package worker

import (
	"bufio"
	"context"
	"distribute-mapreduce/common/mrtype"
	"distribute-mapreduce/grpc/generated/pb/coordinator"
	"encoding/json"
	"fmt"
	"github.com/sirupsen/logrus"
	"google.golang.org/grpc"
	"hash/fnv"
	"log"
	"os"
	"path/filepath"
	"sort"
	"strings"
	"time"
)

var tmpFiles []string

// use ihash(key) % NReduce to choose the reduce
// task number for each KeyValue emitted by Map.
func ihash(key string) int {
	h := fnv.New32a()
	h.Write([]byte(key))
	return int(h.Sum32() & 0x7fffffff)
}

type Worker struct {
	mapFunc       func(string, string) []mrtype.KeyValue
	reduceFunc    func(string, []string) string
	resultBaseDir string

	// 设置grpc client
	coordinatorServiceClient coordinator.CoordinatorServiceClient

	// 用于进行轻微休眠与指数退避机制
	hang int
}

func NewWorker(mapFunc mrtype.MapFunc, reduceFunc mrtype.ReduceFunc, resultBaseDir string) *Worker {
	// 检查resultBaseDir是否存在，如果不存在就创建一个
	if _, err := os.Stat(resultBaseDir); os.IsNotExist(err) {
		logrus.Infof("[Worker] - resultBaseDir %s not exist, create it", resultBaseDir)
		if err = os.MkdirAll(resultBaseDir, 0755); err != nil {
			logrus.Fatalf("[Worker] - creating resultBaseDir %s failed, err = %v", resultBaseDir, err)
		}
	}
	if !strings.HasSuffix(resultBaseDir, "/") {
		resultBaseDir = resultBaseDir + "/"
	}
	return &Worker{
		mapFunc:       mapFunc,
		reduceFunc:    reduceFunc,
		resultBaseDir: resultBaseDir,
	}
}

func (w *Worker) initWorker(serverAddress string) error {
	clt, err := grpc.NewClient(serverAddress, grpc.WithInsecure())
	if err != nil {
		logrus.Fatalf("[Worker] - grpc client init failed, err = %v", err)
	}
	w.coordinatorServiceClient = coordinator.NewCoordinatorServiceClient(clt)
	return nil
}

func (w *Worker) Run(serverAddress string) error {
	err := w.initWorker(serverAddress)
	if err != nil {
		logrus.Errorf("[Worker] - init worker failed, err = %v", err)
		return err
	}
	done := make(chan any, 1)

	for {
		select {
		case <-done:
			return nil

		default:
			w.CallGetTask(done)
		}
	}

	//w.Hello()
}

// hangOver 主要是为了实现指数退避机制
func (w *Worker) hangOver() {
	if w.hang == 0 {
		w.hang = 1
	} else {
		w.hang = w.hang * 2
	}
	if w.hang > 10 {
		w.hang = 10
	}
	time.Sleep(time.Duration(w.hang) * time.Second)
	//logrus.Infof("[Worker] - hang over %d seconds", w.hang)
}

func (w *Worker) CallGetTask(done chan any) {
	// 1. 向coordinator请求任务
	req := coordinator.GetTaskParam{}
	logrus.Infof("[Worker] - getting task")
	reply, err := w.coordinatorServiceClient.GetTask(context.Background(), &req)

	if err != nil {
		logrus.Errorf("[Worker] - get task failed, err = %v", err)
		// retry policy
		w.hangOver()
		// 假定调用失败了，就直接退出吧
		done <- 1
		return
	}
	logrus.Infof("[Worker] - get task, reply, type = %v", reply.TaskType)
	w.executeTask(done, reply)

}
func (w *Worker) executeTask(done chan any, reply *coordinator.GetTaskReply) {
	switch reply.TaskType {
	case coordinator.MRTaskType_MAP:
		// 执行map任务
		content, err := os.ReadFile(reply.Filename)
		if err != nil {
			log.Fatalf("cant read %v", reply.Filename)
		}
		kvs := w.mapFunc(reply.Filename, string(content))

		// 删除"mr-MapTaskId-*"的文件，
		// 这一段的目的是为了通过crash test，因为如果map中间异常退出了，那么下一次相同的MapTaskId就需要重新执行了。
		// 重新执行时需要清理原来残余的错误文件
		// 你能会问，为什么不用trunc打开文件呢？一言难尽，如果你真的看懂了下面的for _, eachKv := range kvs循环，你就明白为什么不用truncate模式打开文件了。
		matches, _ := filepath.Glob(fmt.Sprintf("%smr-%d-*", w.resultBaseDir, reply.MapTaskId))
		for _, match := range matches {
			if err := os.Remove(match); err != nil {
				log.Fatalf("cant remove %v", match)
			}
		}
		//fileMap := make(map[string]*bufio.Writer)
		// 提前创建好所有文件
		openFiles := make([]*os.File, 0)
		fileWriterEncoders := make([]*json.Encoder, 0)
		for i := 0; i < int(reply.NReduce); i++ {
			f, err := os.OpenFile(fmt.Sprintf("%smr-%d-%d", w.resultBaseDir, reply.MapTaskId, i), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
			if err != nil {
				for _, t := range openFiles {
					t.Close()
				}
				log.Fatalf("cant create %v", fmt.Sprintf("%smr-%d-%d", w.resultBaseDir, reply.MapTaskId, i))
				return
			}
			// 对f创建写入器
			bufWriter := bufio.NewWriterSize(f, 10*1024)
			enc := json.NewEncoder(bufWriter)
			fileWriterEncoders = append(fileWriterEncoders, enc)
		}
		for _, eachKv := range kvs {
			reduceIdx := ihash(eachKv.Key) % int(reply.NReduce)
			//interFileName := fmt.Sprintf("%smr-%d-%d", w.resultBaseDir, reply.MapTaskId, reduceIdx) // 不会和其他map冲突
			//tmpFileName := interFileName
			//_, err := os.Stat(tmpFileName)
			//var temp *os.File
			//if errors.Is(err, os.ErrNotExist) {
			//	temp, err = os.OpenFile(tmpFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
			//	// 记录文件名， 以便于worker退出之后可以删除文件
			//	tmpFiles = append(tmpFiles, tmpFileName)
			//	if err != nil {
			//		log.Fatalf("cant create %v", tmpFileName)
			//	}
			//} else {
			//	temp, err = os.OpenFile(tmpFileName, os.O_APPEND|os.O_WRONLY, 0644)
			//	if err != nil {
			//		log.Fatalf("cant open %v", tmpFileName)
			//	}
			//}

			enc := fileWriterEncoders[reduceIdx]
			if err = enc.Encode(&eachKv); err != nil {
				log.Fatalf("cant encode kv %v", eachKv)
			}
		}
		logrus.Infof("[worker] - map task done, mapTaskId = %d, filename = %s", reply.MapTaskId, reply.Filename)
		// 2. 向coordinator报告map任务完成
		_, err = w.coordinatorServiceClient.FinishMapTask(context.Background(), &coordinator.FinishMapTaskParam{
			MapTaskId: reply.MapTaskId,
			Filename:  reply.Filename,
		})
		if err != nil {
			logrus.Errorf("[Worker] - finish map task failed, err = %v", err)
			return
		}
	case coordinator.MRTaskType_REDUCE:
		// 执行reduce任务
		// 聚合所有结尾为reply.ReduceId的文件
		files, err := filepath.Glob(fmt.Sprintf("%smr-*-%d", w.resultBaseDir, reply.ReduceId))
		if err != nil {
			log.Fatalf("cant glob %v", fmt.Sprintf("mr-*-%d", reply.ReduceId))
		}
		kva := make([]mrtype.KeyValue, 0)
		for _, fileName := range files {
			file, err := os.Open(fileName)
			if err != nil {
				log.Fatalf("cant open %v", fileName)
			}
			dec := json.NewDecoder(file)
			for {
				var kv mrtype.KeyValue
				if err := dec.Decode(&kv); err != nil {
					break
				}
				kva = append(kva, kv)
			}
		}
		// kva 排序
		sort.Sort(mrtype.ByKey(kva))
		// 默认是TRUNC方式打开的，符合我们的期望
		ofile, err := os.Create(fmt.Sprintf("%smr-out-%d", w.resultBaseDir, reply.ReduceId))
		defer ofile.Close()
		if err != nil {
			logrus.Fatalf("cant create %v", fmt.Sprintf("mr-out-%d", reply.ReduceId))
		}
		// reduce
		i := 0
		for i < len(kva) {
			j := i + 1
			for j < len(kva) && kva[j].Key == kva[i].Key {
				j++
			}
			var values []string
			for k := i; k < j; k++ {
				values = append(values, kva[k].Value)
			}
			output := w.reduceFunc(kva[i].Key, values)

			// this is the correct format for each line of Reduce output.
			_, _ = fmt.Fprintf(ofile, "%v %v\n", kva[i].Key, output)
			i = j
		}
		_, err = w.coordinatorServiceClient.FinishReduceTask(context.Background(), &coordinator.FinishReduceTaskParam{
			ReduceId: reply.ReduceId,
		})
		if err != nil {
			logrus.Errorf("[Worker] - finish reduce task failed, err = %v", err)
			return
		}
	case coordinator.MRTaskType_WAIT:
		//time.Sleep(1 * time.Second)
		logrus.Infof("[Worker] - wait for task, hang over %d seconds", w.hang)
		w.hangOver()
	case coordinator.MRTaskType_EXIT:
		logrus.Infof("[Worker] - all tasks done, exit")
		done <- 1
	default:
		logrus.Errorf("[Worker] - unknown task type, task type = %s", reply.TaskType)
	}
}

func (w *Worker) Hello() {
	hello, err := w.coordinatorServiceClient.Hello(context.Background(), &coordinator.HelloParam{})
	if err != nil {
		logrus.Errorf("[Worker] - hello failed, err = %v", err)
		return
	}
	logrus.Infof("[Worker] - hello success, reply = %v", hello.Msg)
}
