package mr

import (
	"container/heap"
	"fmt"
	"hash/fnv"
	"io"
	"io/ioutil"
	"log"
	"net/rpc"
	"os"
	"sort"
	"strings"
	"sync"
	"time"
	"unicode"
)

//
// Map functions return a slice of KeyValue.
//
type KeyValue struct {
	Key   string
	Value string
}

type UserMapFn func(string, string) []KeyValue
type UserReduceFn func(string, []string) string

//
// use ihash(key) % NReduce to choose the reduce
// task number for each KeyValue emitted by Map.
//
func ihash(key string) int {
	h := fnv.New32a()
	h.Write([]byte(key))
	return int(h.Sum32() & 0x7fffffff)
}

type IWorker interface {
	Run()
}

type MapWorker struct {
	index int
	fn    UserMapFn
}

type KeyValueSlice []KeyValue

func (x KeyValueSlice) Len() int {
	return len(x)
}

func (x KeyValueSlice) Less(i, j int) bool {
	return x[i].Key < x[j].Key
}

func (x KeyValueSlice) Swap(i, j int) {
	x[i], x[j] = x[j], x[i]
}

func (w *MapWorker) intermediateName(i int) string {
	return fmt.Sprintf("mr-%d-%d", w.index, i)
}

func (w *MapWorker) flush(kvs KeyValueSlice, numReduce int) {
	bucket := make([]KeyValueSlice, numReduce)
	for i := 0; i < numReduce; i++ {
		bucket[i] = make(KeyValueSlice, 0)
	}

	for _, kv := range kvs {
		i := ihash(kv.Key) % numReduce
		bucket[i] = append(bucket[i], kv)
	}

	wg := sync.WaitGroup{}
	for i := 0; i < numReduce; i++ {
		wg.Add(1)

		go func(i int) {
			defer wg.Done()

			sort.Sort(bucket[i])

			fp, err := ioutil.TempFile(".", "mr-tmp-*")
			if err != nil {
				log.Fatalf("cannot open temp file: %s", err)
			}
			defer os.Rename(fp.Name(), w.intermediateName(i))

			for _, kv := range bucket[i] {
				_, err := fp.WriteString(fmt.Sprintf("%v %v\n", kv.Key, kv.Value))
				if err != nil {
					log.Fatalf("cannot write intermediate results to temp file: %s", err)
				}
			}

			err = fp.Close()
			if err != nil {
				log.Fatalf("cannot close temp file: %s", err)
			}
		}(i)
	}
	wg.Wait()
}

func (w *MapWorker) report(numReduce int) {
	paths := make([]string, numReduce)
	for i := 0; i < numReduce; i++ {
		paths[i] = w.intermediateName(i)
	}

	args := ReportMapTaskArgs{Index: w.index, Paths: paths}
	reply := ReportMapTaskReply{}
	if !call("Master.ReportMapTask", &args, &reply) {
		log.Fatal("failed to send report to master.")
	}
}

func (w *MapWorker) Run() {
	args := FetchMapTaskArgs{Index: w.index}
	reply := FetchMapTaskReply{}
	if !call("Master.FetchMapTask", &args, &reply) {
		log.Fatal("failed to fetch map task data.")
	}

	log.Printf("begin map task #%d", w.index)

	kvs := w.fn(reply.Path, reply.Data)
	w.flush(kvs, reply.NumReduce)
	w.report(reply.NumReduce)

	log.Printf("end map task #%d", w.index)
}

type ReduceWorker struct {
	index  int
	fn     UserReduceFn
	buffer []KeyValueSlice
	kvs    KeyValueSlice
}

func parseFile(path string) KeyValueSlice {
	log.Printf("parsing %q...", path)

	fp, err := os.Open(path)
	if err != nil {
		log.Fatalf("cannot open file: %q", path)
	}
	defer fp.Close()

	data, err := io.ReadAll(fp)
	if err != nil {
		log.Fatalf("cannot read file: %q", path)
	}

	predicate := func(c rune) bool { return unicode.IsSpace(c) }
	tokens := strings.FieldsFunc(string(data), predicate)
	slice := make(KeyValueSlice, 0, len(tokens)/2)
	for i := 0; i+1 < len(tokens); i += 2 {
		pair := KeyValue{Key: tokens[i], Value: tokens[i+1]}
		slice = append(slice, pair)
	}

	return slice
}

func (w *ReduceWorker) collect() error {
	w.buffer = make([]KeyValueSlice, 0)
	ch := make(chan KeyValueSlice)

	done := make(chan bool)
	go func() {
		for kvs := range ch {
			w.buffer = append(w.buffer, kvs)
		}
		done <- true
	}()

	i := 0
	wg := sync.WaitGroup{}

	defer func() {
		wg.Wait()
		close(ch)
		<-done
	}()

	noreplyCount := 0
	for {
		args := FetchReduceTaskArgs{Index: w.index}
		reply := FetchReduceTaskReply{}
		if !call("Master.FetchReduceTask", &args, &reply) {
			log.Fatal("failed to fetch reduce task data.")
		}

		size := len(reply.Paths)
		noreply := i == size
		for ; i < size; i++ {
			path := reply.Paths[i]

			wg.Add(1)
			go func() {
				ch <- parseFile(path)
				wg.Done()
			}()
		}

		if reply.NumMap == size {
			break
		} else if reply.NumMap < size {
			log.Fatal("unexpected: reply.NumMap is smaller than buffer size.")
		}

		if noreply {
			noreplyCount++
			if noreplyCount >= 10 {
				return fmt.Errorf("no data from master.")
			}
			time.Sleep(time.Second)
		}
	}

	return nil
}

type HeapItem struct {
	key   string
	index int
}

type Heap []HeapItem

func (h Heap) Len() int {
	return len(h)
}

func (h Heap) Less(i, j int) bool {
	return h[i].key < h[j].key
}

func (h Heap) Swap(i, j int) {
	h[i], h[j] = h[j], h[i]
}

func (h *Heap) Push(x interface{}) {
	*h = append(*h, x.(HeapItem))
}

func (h *Heap) Pop() (v interface{}) {
	v, *h = (*h)[len(*h)-1], (*h)[:len(*h)-1]
	return
}

func (w *ReduceWorker) merge() {
	log.Printf("reduce task #%d: merging files...", w.index)

	size := 0
	h := make(Heap, 0, len(w.buffer))
	for i, slice := range w.buffer {
		size += len(slice)
		if len(slice) == 0 {
			continue
		}

		heap.Push(&h, HeapItem{key: slice[0].Key, index: i})
	}

	w.kvs = make(KeyValueSlice, size)
	pos := make([]int, len(w.buffer))
	for i := 0; i < size; i++ {
		if len(h) == 0 {
			log.Fatal("expect the heap is non-empty.")
		}

		u := heap.Pop(&h).(HeapItem)
		j := u.index
		w.kvs[i] = w.buffer[j][pos[j]]

		pos[j]++
		if pos[j] < len(w.buffer[j]) {
			heap.Push(&h, HeapItem{key: w.buffer[j][pos[j]].Key, index: j})
		}
	}

	if len(h) != 0 {
		log.Fatal("expect the heap has become empty.")
	}
}

func (w *ReduceWorker) outputName() string {
	return fmt.Sprintf("mr-out-%d", w.index)
}

func (w *ReduceWorker) report() {
	args := ReportReduceTaskArgs{Index: w.index}
	reply := ReportReduceTaskReply{}
	if !call("Master.ReportReduceTask", &args, &reply) {
		log.Fatal("failed to send report to master.")
	}
}

func (w *ReduceWorker) Run() {
	log.Printf("begin reduce task #%d.", w.index)

	err := w.collect()
	if err != nil {
		log.Print(err)
		return
	}
	w.merge()

	fp, err := ioutil.TempFile(".", "mr-reduce-tmp-*")
	if err != nil {
		log.Fatalf("cannot open temp file: %s", err)
	}
	defer os.Rename(fp.Name(), w.outputName())

	for i, j := 0, 0; i < len(w.kvs); i = j {
		key := w.kvs[i].Key
		j = i + 1
		for j < len(w.kvs) && w.kvs[j].Key == key {
			j++
		}

		slice := make([]string, j-i)
		for k := i; k < j; k++ {
			slice[k-i] = w.kvs[k].Value
		}
		value := w.fn(key, slice)

		_, err := fp.WriteString(fmt.Sprintf("%v %v\n", key, value))
		if err != nil {
			log.Fatalf("cannot write to temp file: %s", err)
		}
	}

	err = fp.Close()
	if err != nil {
		log.Fatalf("cannot close temp file: %s", err)
	}

	w.report()

	log.Printf("end reduce task #%d.", w.index)
}

//
// main/mrworker.go calls this function.
//
func Worker(mapf UserMapFn, reducef UserReduceFn) {
	setupLog(fmt.Sprintf("worker %d", os.Getpid()))
	log.Print("worker started.")

	for {
		args := FetchTaskArgs{}
		reply := FetchTaskReply{}
		if !call("Master.FetchTask", &args, &reply) {
			log.Fatal("failed to fetch task from master.")
		}

		if reply.Any {
			var w IWorker

			if reply.Kind == TaskKindMap {
				log.Printf("got map task #%d.", reply.Index)
				w = &MapWorker{index: reply.Index, fn: mapf}
			} else if reply.Kind == TaskKindReduce {
				log.Printf("got reduce task #%d.", reply.Index)
				w = &ReduceWorker{index: reply.Index, fn: reducef}
			} else {
				log.Fatalf("unknown task kind: %d.", reply.Kind)
			}

			w.Run()
		} else {
			log.Print("this worker is alive.")
			time.Sleep(time.Second)
		}
	}
}

//
// send an RPC request to the master, wait for the response.
// usually returns true.
// returns false if something goes wrong.
//
func call(rpcname string, args interface{}, reply interface{}) bool {
	// c, err := rpc.DialHTTP("tcp", "127.0.0.1"+":1234")
	sockname := masterSock()
	c, err := rpc.DialHTTP("unix", sockname)
	if err != nil {
		log.Fatal("dialing:", err)
	}
	defer c.Close()

	err = c.Call(rpcname, args, reply)
	if err == nil {
		return true
	}

	fmt.Println(err)
	return false
}
