package shardctrler

import (
	"6.5840/raft"
	"log"
	"sort"
	"sync/atomic"
	"time"
)
import "6.5840/labrpc"
import "sync"
import "6.5840/labgob"

const (
	RPCHandleTimeOut = time.Millisecond * 2000
)

const Debug = false

func DPrintf(format string, a ...interface{}) (n int, err error) {
	if Debug {
		log.Printf(format, a...)
	}
	return
}

type ShardCtrler struct {
	mu      sync.Mutex
	me      int
	rf      *raft.Raft
	dead    int32
	applyCh chan raft.ApplyMsg

	// Your data here.
	waiCh      map[int]*chan Result
	historyMap map[int64]*Result

	configs []Config // indexed by config num
}

type OpType string

const (
	OpJoin  OpType = "Join"
	OpLeave OpType = "Leave"
	OpMove  OpType = "Move"
	OpQuery OpType = "Query"
)

type Op struct {
	// Your data here.
	OpCmd OpType
	// Join
	Servers map[int][]string
	// Leave
	GIDs []int
	// Move
	Shard int
	GID   int
	// Query
	Num int // desired config number

	Seq      uint64
	ClientId int64
}

type Result struct {
	LastSeq uint64
	Config  Config
	Err     Err
	ResTerm int
}

func (sc *ShardCtrler) HandleOp(opArgs *Op) Result {
	sc.mu.Lock()
	if history, ok := sc.historyMap[opArgs.ClientId]; ok && history.LastSeq == opArgs.Seq {
		sc.mu.Unlock()
		DPrintf("已经执行过%s，直接取出结果返回\n", opArgs.OpCmd)
		return *history
	}
	sc.mu.Unlock()

	startIndex, startTerm, isLeader := sc.rf.Start(*opArgs)
	if !isLeader {
		DPrintf("非Leader节点\n")
		return Result{Err: ErrWrongLeader}
	}

	sc.mu.Lock()
	newCh := make(chan Result)
	sc.waiCh[startIndex] = &newCh
	sc.mu.Unlock()

	defer func() {
		sc.mu.Lock()
		delete(sc.waiCh, startIndex)
		close(newCh)
		sc.mu.Unlock()
	}()

	res := Result{}
	select {
	case <-time.After(RPCHandleTimeOut):
		res.Err = ErrHandleOpTimeOut
		DPrintf("处理时间超时，shardctrler %v 处理操作 %s，ClientId %v Seq %v\n", sc.me, opArgs.OpCmd, opArgs.ClientId, opArgs.Seq)
		return res
	case msg, ok := <-newCh:
		if ok && msg.ResTerm == startTerm {
			res = msg
			res.Err = OK
			DPrintf("成功执行操作%s\n", opArgs.OpCmd)
			return res
		} else if !ok {
			res.Err = ErrChanClosed
			DPrintf("通信管道关闭，shardctrler %v 处理操作 %s，ClientId %v Seq %v\n", sc.me, opArgs.OpCmd, opArgs.ClientId, opArgs.Seq)
			return res
		} else {
			res.Err = ErrLeaderOutdated
			DPrintf("Term前后不一致，可能Leader过期了，shardctrler %v 处理操作 %s，ClientId %v Seq %v，现在的Term是%v，开始的Term是%v\n",
				sc.me, opArgs.OpCmd, opArgs.ClientId, opArgs.Seq, res.ResTerm, startTerm)
			return res
		}
	}
}

func (sc *ShardCtrler) Join(args *JoinArgs, reply *JoinReply) {
	// Your code here.
	opArgs := &Op{
		OpCmd:    OpJoin,
		Servers:  args.Servers,
		Seq:      args.Seq,
		ClientId: args.ClientId,
	}
	res := sc.HandleOp(opArgs)
	reply.Err = res.Err
	if res.Err == ErrWrongLeader {
		reply.WrongLeader = true
	}
}
func CreateNewConfig(configs []Config, newGroups map[int][]string) Config {
	lastConfig := configs[len(configs)-1]
	newConfig := Config{
		Num:       lastConfig.Num + 1,
		Shards:    lastConfig.Shards,
		Groups:    make(map[int][]string),
		CachedGid: make(map[int][]string),
	}

	deep_copy_map(&lastConfig, &newConfig)

	// 要确保加入cache的gid在所有节点上相同, 所以要求有序
	total_new_gids := make([]int, 0)
	for gid := range newGroups {
		total_new_gids = append(total_new_gids, gid)
	}
	sort.Ints(total_new_gids)

	new_gids := make([]int, 0)
	for _, gid := range total_new_gids {
		if len(newConfig.Groups) < NShards {
			newConfig.Groups[gid] = newGroups[gid]
			new_gids = append(new_gids, gid)
		} else {
			newConfig.add_cache(gid, newGroups[gid])
		}
	}

	// first config shard or previous config is empty
	if len(lastConfig.Groups) == 0 {
		for shard := 0; shard < NShards; shard++ {
			idx := shard % len(new_gids)
			newConfig.Shards[shard] = new_gids[idx]
		}
	} else {
		// Reallocate shards
		map_shard_len, max_map_gid_len, max_map_gid_count := get_max_map_len_count(&newConfig)

		for _, new_gid := range new_gids {
			map_shard_len[new_gid] = 0
			idx := 0
			for max_map_gid_len > map_shard_len[new_gid]+1 {
				old_gid := newConfig.Shards[idx]

				if map_shard_len[old_gid] == max_map_gid_len {
					// old_gid标识的这个group分配了最多的shard, 将当前的shard重新分配给新的Group
					newConfig.Shards[idx] = new_gid
					max_map_gid_count -= map_shard_len[old_gid]
					map_shard_len[old_gid]--
					map_shard_len[new_gid]++
					if max_map_gid_count == 0 {
						// 原来映射最多的组都已经被剥夺了一个映射, 需要重新统计
						map_shard_len, max_map_gid_len, max_map_gid_count = get_max_map_len_count(&newConfig)
					}
				}
				idx++
				idx %= NShards
			}
		}
	}

	//for gid, servers := range lastConfig.Groups {
	//	newConfig.Groups[gid] = servers
	//}
	//for gid, servers := range newGroups {
	//	newConfig.Groups[gid] = servers
	//}
	//
	//gids := make([]int, 0, len(newConfig.Groups)) // 指定初始长度为0，容量为len(newConfig.Groups)
	//for gid := range newConfig.Groups {
	//	gids = append(gids, gid)
	//}
	//
	//groupCnt := len(gids)
	//if groupCnt == 0 {
	//	panic("no groups to assign shards to")
	//}
	//
	//// 负载均衡体现再切片分配
	//for shard := 0; shard < NShards; shard++ {
	//	mapGid := gids[shard%groupCnt]
	//	newConfig.Shards[shard] = mapGid
	//}

	return newConfig
}

func (sc *ShardCtrler) Leave(args *LeaveArgs, reply *LeaveReply) {
	// Your code here.
	opArgs := &Op{
		OpCmd:    OpLeave,
		GIDs:     args.GIDs,
		Seq:      args.Seq,
		ClientId: args.ClientId,
	}
	res := sc.HandleOp(opArgs)
	reply.Err = res.Err
	if res.Err == ErrWrongLeader {
		reply.WrongLeader = true
	}
}
func RemoveGidServers(configs []Config, gids []int) Config {
	if len(configs) == 0 {
		panic("len(configs)==0")
	}

	lastConfig := configs[len(configs)-1]
	newConfig := Config{
		Num:       lastConfig.Num + 1,
		Shards:    lastConfig.Shards,
		Groups:    make(map[int][]string),
		CachedGid: make(map[int][]string),
	}

	// 深复制Group
	deep_copy_map(&lastConfig, &newConfig)

	// 先移除缓冲区的gid
	newConfig.remove_cache(gids)

	// 再从Groups移除gid, 同时将shard对应的映射置为0
	newConfig.remove_group(gids)

	// 如果缓冲区有剩余的gid, 移动到Groups, 注意要注意顺序
	newConfig.move_cache()

	// 要确保有序
	min_gid_arr := get_min_arr(&newConfig)
	if len(newConfig.Groups) > 0 {
		for shard, map_gid := range newConfig.Shards {
			if map_gid != 0 {
				continue
			}
			newConfig.Shards[shard] = min_gid_arr[0]
			min_gid_arr = min_gid_arr[1:]

			if len(min_gid_arr) == 0 {
				min_gid_arr = get_min_arr(&newConfig)
			}
		}
	} else {
		newConfig.Shards = [NShards]int{}
	}

	// 标记被移除的gid
	//removeGids := make(map[int]struct{})
	//for _, gid := range gids {
	//	removeGids[gid] = struct{}{}
	//}
	//
	//remindGids := make([]int, 0)
	//for gid, servers := range lastConfig.Groups {
	//	if _, ok := removeGids[gid]; !ok {
	//		newConfig.Groups[gid] = servers
	//		remindGids = append(remindGids, gid)
	//	}
	//}
	//
	//groupCnt := len(remindGids)
	//if groupCnt == 0 {
	//	return newConfig
	//}
	//
	//for shard := 0; shard < NShards; shard++ {
	//	mapGid := remindGids[shard%groupCnt]
	//	newConfig.Shards[shard] = mapGid
	//}

	return newConfig
}

func (sc *ShardCtrler) Move(args *MoveArgs, reply *MoveReply) {
	// Your code here.
	opArgs := &Op{
		OpCmd:    OpMove,
		Shard:    args.Shard,
		GID:      args.GID,
		Seq:      args.Seq,
		ClientId: args.ClientId,
	}
	res := sc.HandleOp(opArgs)
	reply.Err = res.Err
	if res.Err == ErrWrongLeader {
		reply.WrongLeader = true
	}
}
func MoveShard2Gid(configs []Config, shardId int, gidId int) Config {
	if len(configs) == 0 {
		panic("len(configs)==0")
	}

	lastConfig := configs[len(configs)-1]
	newConfig := Config{
		Num:       lastConfig.Num + 1,
		Shards:    lastConfig.Shards,
		Groups:    make(map[int][]string),
		CachedGid: make(map[int][]string),
	}

	// 深复制Group
	deep_copy_map(&lastConfig, &newConfig)

	for shard, gid := range lastConfig.Shards {
		if shard != shardId {
			newConfig.Shards[shard] = gid
		} else {
			newConfig.Shards[shardId] = gidId
		}
	}

	//for gid, servers := range lastConfig.Groups {
	//	newConfig.Groups[gid] = servers
	//}
	//
	//for shard, gid := range lastConfig.Shards {
	//	if shard != shardId {
	//		newConfig.Shards[shard] = gid
	//	} else {
	//		newConfig.Shards[shardId] = gidId
	//	}
	//}

	return newConfig
}

func (sc *ShardCtrler) Query(args *QueryArgs, reply *QueryReply) {
	// Your code here.
	opArgs := &Op{
		OpCmd:    OpQuery,
		Num:      args.Num,
		Seq:      args.Seq,
		ClientId: args.ClientId,
	}
	res := sc.HandleOp(opArgs)
	reply.Err = res.Err
	if res.Err == ErrWrongLeader {
		reply.WrongLeader = true
	}
	reply.Config = res.Config
}
func QueryConfig(configs []Config, num int) Config {
	if len(configs) == 0 {
		return Config{Num: 0}
	}

	lastConfig := configs[len(configs)-1]
	if num == -1 || num >= lastConfig.Num {
		return lastConfig
	}

	return configs[num]
}

func (sc *ShardCtrler) Execute() {
	for sc.killed() == false {
		msg := <-sc.applyCh
		if msg.CommandValid {
			op := msg.Command.(Op)
			sc.mu.Lock()

			res := Result{}
			flag := false
			if history, ok := sc.historyMap[op.ClientId]; ok {
				if history.LastSeq == op.Seq {
					res = *history
				} else if history.LastSeq < op.Seq {
					flag = true
				}
			} else {
				flag = true
			}

			if flag {
				res = sc.applyState(&op)
				if op.OpCmd != OpQuery {
					DPrintf("ShardCtrler %v HandleOp: ClientId %v Seq %v Servers=%+v, Gids=%+v, Shard=%v, Gid=%v, Num=%v，处理后的configs[%v].Shards=%+v\n",
						sc.me, op.ClientId, op.Seq, op.Servers, op.GIDs, op.Shard, op.GID, op.Num, len(sc.configs)-1, sc.configs[len(sc.configs)-1].Shards)
				}
				res.ResTerm = msg.SnapshotTerm
				sc.historyMap[op.ClientId] = &res
			}

			ch, ok := sc.waiCh[msg.CommandIndex]
			sc.mu.Unlock()
			if ok {
				func() {
					defer func() {
						if r := recover(); r != nil {
							DPrintf("")
						}
					}()
					res.ResTerm = msg.SnapshotTerm

					*ch <- res
				}()
			}
		}
	}
}

func (sc *ShardCtrler) applyState(op *Op) Result {
	res := Result{
		LastSeq: op.Seq,
	}
	switch op.OpCmd {
	case OpQuery:
		rConfig := QueryConfig(sc.configs, op.Num)
		res.Config = rConfig
		res.Err = OK

	case OpJoin:
		newConfig := CreateNewConfig(sc.configs, op.Servers)
		sc.CheckAppendConfig(newConfig)
		res.Err = OK

	case OpLeave:
		newConfig := RemoveGidServers(sc.configs, op.GIDs)
		sc.CheckAppendConfig(newConfig)
		res.Err = OK

	case OpMove:
		newConfig := MoveShard2Gid(sc.configs, op.Shard, op.GID)
		sc.CheckAppendConfig(newConfig)
		res.Err = OK

	}
	return res
}

func (sc *ShardCtrler) CheckAppendConfig(newConfig Config) {
	if newConfig.Num > sc.configs[len(sc.configs)-1].Num {
		sc.configs = append(sc.configs, newConfig)
	}
}

// the tester calls Kill() when a ShardCtrler instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
func (sc *ShardCtrler) Kill() {
	sc.rf.Kill()
	// Your code here, if desired.
}

func (sc *ShardCtrler) killed() bool {
	z := atomic.LoadInt32(&sc.dead)
	return z == 1
}

// needed by shardkv tester
func (sc *ShardCtrler) Raft() *raft.Raft {
	return sc.rf
}

// servers[] contains the ports of the set of
// servers that will cooperate via Raft to
// form the fault-tolerant shardctrler service.
// me is the index of the current server in servers[].
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardCtrler {
	sc := new(ShardCtrler)
	sc.me = me

	sc.configs = make([]Config, 1)
	sc.configs[0].Groups = map[int][]string{}

	labgob.Register(Op{})
	sc.applyCh = make(chan raft.ApplyMsg)
	sc.rf = raft.Make(servers, me, persister, sc.applyCh)

	// Your code here.
	sc.historyMap = make(map[int64]*Result)
	sc.waiCh = make(map[int]*chan Result)

	go sc.Execute()

	return sc
}
