package mapreduce

import (
	"fmt"
	"sync"
)
//TaskStatus Enum
type TaskStatus int
const(
	Initial TaskStatus = iota
	MapAssigned
	MapFinished
)
//we need to reuse workers
type WorkerStatus int
const(
	Idle WorkerStatus = iota
	Inprogress
)
//
// schedule() starts and waits for all tasks in the given phase (mapPhase
// or reducePhase). the mapFiles argument holds the names of the files that
// are the inputs to the map phase, one per map task. nReduce is the
// number of reduce tasks. the registerChan argument yields a stream
// of registered workers; each item is the worker's RPC address,
// suitable for passing to call(). registerChan will yield all
// existing registered workers (if any) and new ones as they register.
//
func schedule(jobName string, mapFiles []string, nReduce int, phase jobPhase, registerChan chan string) {
	var ntasks int
	var n_other int // number of inputs (for reduce) or outputs (for map)
	switch phase {
	case mapPhase:
		ntasks = len(mapFiles)
		n_other = nReduce
	case reducePhase:
		ntasks = nReduce
		n_other = len(mapFiles)
	}
	fmt.Printf("Schedule: %v %v tasks (%d I/Os)\n", ntasks, phase, n_other)
	//first think sequential!then make it work in parallel by go
	//record task status
	taskStatus :=make([]TaskStatus,ntasks)
	for i:=0;i<len(taskStatus);i++{
		taskStatus[i]=Initial
	}
	//1.think about workers already registered,iterate a channel is the same as receive and send
	var finished_task_num = 0;
	// wait all mapTaskDone
	var wg sync.WaitGroup
	wg.Add(ntasks)
	// mutex to protect shared data
	var mutex sync.Mutex
	// remember to set buffered chan
	taskChannel:=make(chan string,2)
	var finishFlag = false
	for{
		select{
		case workerAddr:=<-registerChan:
			fmt.Printf("[%s]incoming a new worker:%s\n",phase,workerAddr)
			//forward workerAddr to taskChannel
			go func(){taskChannel<-workerAddr}()
		case readyWorker:=<-taskChannel:
			//choose to exit select
			if finished_task_num>=ntasks{
				finishFlag = true;
			}
			fmt.Printf("[%s]incoming a ready worker:%s\n",phase,readyWorker)
			go func() {
				suitable_index := -1
				//1.iterate tasks to find a suitable one,as threre is concurrent access,a mutex is needed
				mutex.Lock()
				for i := 0; i < ntasks; i++ {
					if taskStatus[i] == Initial {
						suitable_index = i
						taskStatus[suitable_index] = MapAssigned
						break;
					} else {
						continue
					}
				}
				mutex.Unlock()
				if suitable_index!=-1{

					req := &DoTaskArgs{
						JobName:       jobName,
						File:          mapFiles[suitable_index],
						Phase:         phase,
						TaskNumber:    suitable_index,
						NumOtherPhase: n_other,
					}
					isSuc := call(readyWorker, "Worker.DoTask", req, nil)
					if isSuc {
						//change taskStatus
						mutex.Lock()
						finished_task_num += 1
						mutex.Unlock()
						//finish one task,count down
						wg.Done()
						//resend it to taskChannel
						go func(){taskChannel<-readyWorker}()
					}else{
						//reset task status to initial,
						mutex.Lock()
						taskStatus[suitable_index] = Initial
						mutex.Unlock()
						fmt.Printf("failed to call doTask in worker!%d,%s\n",suitable_index,readyWorker)
					}
				}
			}()
		}
		if finishFlag{

			break;

		}else{
			fmt.Printf("A nother select\n")
		}
	}
	wg.Wait();
	fmt.Printf("TaskFinished\n")
	// All ntasks tasks have to be scheduled on workers. Once all tasks
	// have completed successfully, schedule() should return.
	//
	// Your code here (Part III, Part IV).
	//
	fmt.Printf("Schedule: %v done\n", phase)
}
/**
 	1.below code can pass test III; suffix:_III
	in case of test,renamem it as schedule
 */
func schedule_III(jobName string, mapFiles []string, nReduce int, phase jobPhase, registerChan chan string) {
	var ntasks int
	var n_other int // number of inputs (for reduce) or outputs (for map)
	switch phase {
	case mapPhase:
		ntasks = len(mapFiles)
		n_other = nReduce
	case reducePhase:
		ntasks = nReduce
		n_other = len(mapFiles)
	}
	fmt.Printf("Schedule: %v %v tasks (%d I/Os)\n", ntasks, phase, n_other)
	//first think sequential!then make it work in parallel by go
	//record task status
	taskStatus :=make([]TaskStatus,ntasks)
	for i:=0;i<len(taskStatus);i++{
		taskStatus[i]=Initial
	}
	//1.think about workers already registered,iterate a channel is the same as receive and send
	var finished_task_num = 0;
	// wait all mapTaskDone
	var wg sync.WaitGroup
	wg.Add(ntasks)
	// mutex to protect shared data
	var mutex sync.Mutex
	// remember to set buffered chan
	taskChannel:=make(chan string,2)
	var finishFlag = false
	for{
		select{
		case workerAddr:=<-registerChan:
			fmt.Printf("[%s]incoming a new worker:%s\n",phase,workerAddr)
			//forward workerAddr to taskChannel
			go func(){taskChannel<-workerAddr}()
		case readyWorker:=<-taskChannel:
			//choose to exit select
			if finished_task_num>=ntasks{
				finishFlag = true;
			}
			fmt.Printf("[%s]incoming a ready worker:%s\n",phase,readyWorker)
			go func() {
				suitable_index := -1
				//1.iterate tasks to find a suitable one,as threre is concurrent access,a mutex is needed
				mutex.Lock()
				for i := 0; i < ntasks; i++ {
					if taskStatus[i] == Initial {
						suitable_index = i
						taskStatus[suitable_index] = MapAssigned
						break;
					} else {
						continue
					}
				}
				mutex.Unlock()
				if suitable_index!=-1{

					req := &DoTaskArgs{
						JobName:       jobName,
						File:          mapFiles[suitable_index],
						Phase:         phase,
						TaskNumber:    suitable_index,
						NumOtherPhase: n_other,
					}
					isSuc := call(readyWorker, "Worker.DoTask", req, nil)
					if isSuc {
						//change taskStatus
						mutex.Lock()
						finished_task_num += 1
						mutex.Unlock()
						//finish one task,count down
						wg.Done()
						//resend it to taskChannel
						go func(){taskChannel<-readyWorker}()
					}else{
						//
					}
				}
			}()
		}
		if finishFlag{
			break;
		}
	}
	wg.Wait();
	fmt.Printf("TaskFinished\n")
	// All ntasks tasks have to be scheduled on workers. Once all tasks
	// have completed successfully, schedule() should return.
	//
	// Your code here (Part III, Part IV).
	//
	fmt.Printf("Schedule: %v done\n", phase)
}

func schedulePrev(jobName string, mapFiles []string, nReduce int, phase jobPhase, registerChan chan string) {
	var ntasks int
	var n_other int // number of inputs (for reduce) or outputs (for map)
	switch phase {
	case mapPhase:
		ntasks = len(mapFiles)
		n_other = nReduce
	case reducePhase:
		ntasks = nReduce
		n_other = len(mapFiles)
	}

	fmt.Printf("Schedule: %v %v tasks (%d I/Os)\n", ntasks, phase, n_other)
	//ask the worker to process their task

	//first think sequential!then make it work in parallel

	//record task status
	subFileStatus :=make([]TaskStatus,len(mapFiles))
	for i:=0;i<len(mapFiles);i++{
		subFileStatus[i]=Initial
	}
	//record worker status
	//workerStatus:=make(map[string]WorkerStatus);

	//1.think about workers already registered,iterate a channel is the same as receive and send
	var taskNumId = 0;
	// wait all mapTaskDone
	var wg sync.WaitGroup
	// mutex to protect shared data
	var mutex sync.Mutex
	// 1.incoming a new worker
	//for workerAddr:= range registerChan{
	//	//worker address
	//	fmt.Println("incoming a new worker:%s",workerAddr)
	//	//run in parallel
	//	go func() {
	//		mutex.Lock()
	//		defer mutex.Unlock()
	//		workerStatus[workerAddr] = Idle
	//		//2.iterate tasks to find a suitable one
	//		for i := 0; i < len(mapFiles); i++ {
	//			if subFileStatus[i] == Initial {
	//				wg.Add(1)
	//				req := &DoTaskArgs{
	//					JobName:       jobName,
	//					File:          mapFiles[i],
	//					Phase:         mapPhase,
	//					TaskNumber:    taskNumId,
	//					NumOtherPhase: nReduce,
	//				}
	//				//change subFilesStatus
	//				subFileStatus[i] = MapAssigned
	//				taskNumId += 1
	//				isSuc := call(workerAddr, "Worker.DoTask", req, nil)
	//				if isSuc {
	//					//finish one task
	//					wg.Done()
	//				}
	//			} else {
	//				continue
	//			}
	//		}
	//	}()
	//}
	//use a taskChannel to combine registerChan and refreshed worker
	taskChannel:=make(chan string)
	select{
	case workerAddr:=<-registerChan:
		fmt.Printf("incoming a new worker:%s",workerAddr)
		//forward workerAddr to taskChannel
		go func(){taskChannel<-workerAddr}()
	case readyWorker:=<-taskChannel:
		//choose to exit select
		if taskNumId>len(mapFiles){
			break;
		}
		fmt.Printf("incoming a ready worker:%s",readyWorker)
		go func() {
			mutex.Lock()
			defer mutex.Unlock()
			//workerStatus[workerAddr] = Idle
			//1.iterate tasks to find a suitable one
			for i := 0; i < len(mapFiles); i++ {
				if subFileStatus[i] == Initial {
					wg.Add(1)
					req := &DoTaskArgs{
						JobName:       jobName,
						File:          mapFiles[i],
						Phase:         phase,
						TaskNumber:    taskNumId,
						NumOtherPhase: nReduce,
					}
					//change subFilesStatus
					subFileStatus[i] = MapAssigned
					taskNumId += 1
					isSuc := call(readyWorker, "Worker.DoTask", req, nil)
					if isSuc {
						//finish one task
						wg.Done()
						//resend it to taskChannel
						go func(){taskChannel<-readyWorker}()
					}else{
						fmt.Printf("[Eror]failed to call%s",readyWorker)
					}
				} else {
					continue
				}
			}
		}()
	}
	wg.Wait();
	fmt.Printf("TaskFinished")
	// All ntasks tasks have to be scheduled on workers. Once all tasks
	// have completed successfully, schedule() should return.
	//
	// Your code here (Part III, Part IV).
	//
	fmt.Printf("Schedule: %v done\n", phase)
}