package mapreduce

import (
	"fmt"
	"net"
	"strconv"
	"sync"
)

type Master struct {
	sync.Mutex			//implements mutex lock

	address string		//tcp address like "localhost:8989"
	doneChannel	chan bool

	newCond *sync.Cond	//signals when Register() adds to workers[]
	workers []string 	//each work's Unix-domain socket name -- it's RPC address

	jobName	string		//name of current job
	files 	[]string	//input files
	nReduce	int			//number of reduce partitions

	l net.Listener

	shutdown chan struct{}
	stats	 []int
}

func newMaster(address string) (mr *Master) {
	mr = new(Master)
	mr.address = address
	mr.shutdown = make(chan struct{})
	mr.newCond = sync.NewCond(mr)
	mr.doneChannel = make(chan bool)
	return
}

func Sequential(
	jobName string,
	files 	[]string,
	nReduce int,
	mapF    func(string, string) []KeyValue,
	reduceF func(string, []string) string,
) (mr *Master) {
	mr = newMaster("mr-master-1")
	mr.run(jobName, files, nReduce, func(phase jobPhase) {
		switch phase {
		case mapPhase:
			for i, f := range files {
				DoMap(mr.jobName, i, f, mr.nReduce, mapF)
				fmt.Printf("DoMap %d %s\n", i, f)
			}
		case reducePhase:
			for i := 0; i < mr.nReduce; i++ {
				outFile := "F:/tmp/mr/out." + jobName + "_" + strconv.Itoa(i)
				DoReduce(mr.jobName, i, outFile, len(mr.files), reduceF)
				fmt.Printf("DoReduce %d %s\n", i, outFile)
			}
		}
	}, func() {
		mr.stats = []int{len(files) + nReduce}
	})
	return mr
}

func (mr *Master) run(
	jobName string,
	files 	[]string,
	nReduce int,
	schedule func(phase jobPhase),
	finish func(),
){
	mr.jobName = jobName
	mr.files = files
	mr.nReduce = nReduce

	fmt.Printf("%s: Starting Map/Reduce task %s\n", mr.address, mr.jobName)

	schedule(mapPhase)
	schedule(reducePhase)
	finish()

	fmt.Printf("%s: Map/Reduce task compeleted\n", mr.address)
}
