package shardctrler

import (
	"6.824/raft"
	"fmt"
	"log"
	"os"
	"sort"
	"time"
)
import "6.824/labrpc"
import "sync"
import "6.824/labgob"

type ShardCtrler struct {
	mu      sync.Mutex
	me      int
	rf      *raft.Raft
	applyCh chan raft.ApplyMsg

	// Your data here.
	//历史请求命令
	ClientCmdSeq map[int64]int64
	//日志
	logger *log.Logger
	// 当前index的命令对应的传输通道
	pendingChs map[int]chan *Reply
	// 当前index的命令
	pendingOps map[int]*Op
	//已经应用的最新的日志，raft读取持久化状态可能会重新提交日志
	LastApplied int

	configs []Config // indexed by config num
}

const (
	Join int = iota
	Leave
	Move
	Query
)

//用于给raft节点同步日志
type Op struct {
	// Your data here.
	Type    int
	Servers map[int][]string // new GID -> servers mappings
	GIDs    []int
	Shard   int
	GID     int
	Num     int // desired config number

	ClientId int64
	CmdId    int64
}

func (sc *ShardCtrler) checkDuplicated(op *Op) bool {
	id, ok := sc.ClientCmdSeq[op.ClientId]
	//检查是否存在客户端对应信息已经是否已经过期
	return ok && id >= op.CmdId
}

func (sc *ShardCtrler) clearOld(index int) {
	// 清理映射表
	sc.mu.Lock()
	delete(sc.pendingChs, index)
	delete(sc.pendingOps, index)
	sc.mu.Unlock()
	//sc.logger.Printf("清理过期等待信息: index=%d", index)
}

func (sc *ShardCtrler) HandleRequest(args *Args, reply *Reply) {
	sc.mu.Lock()

	//sc.logger.Printf("收到客户端请求: Type=%d, ClientId=%d, CmdId=%d",
	//	args.Type, args.ClientId%10000, args.CmdId)

	op := Op{
		Type:     args.Type,
		Servers:  args.Servers,
		GIDs:     args.GIDs,
		Shard:    args.Shard,
		GID:      args.GID,
		Num:      args.Num,
		ClientId: args.ClientId,
		CmdId:    args.CmdId,
	}

	//提前检查命令是否重复，可以减少一定的冗余日志，之前kvraft的时候，只在执行时检查，会导致快照中多余冗余日志
	//Get命令无需检查是否冗余
	if args.Type != Query && sc.checkDuplicated(&op) {
		reply.Err = ErrDuplicated
		sc.mu.Unlock()
		sc.logger.Printf("请求重复: ClientId=%d, CmdId=%d", args.ClientId%10000, args.CmdId)
		return
	}

	index, term, isLeader := sc.rf.Start(op)

	if !isLeader {
		sc.mu.Unlock()
		reply.WrongLeader = true
		reply.Err = ErrWrongLeader
		//sc.logger.Printf("非领导者拒绝请求: ClientId=%d, CmdId=%d", args.ClientId%10000, args.CmdId)
		return
	}

	switch op.Type {
	case Join:
		sc.logger.Printf("收到Join请求: ClientId=%d", args.ClientId%10000)
	case Leave:
		sc.logger.Printf("收到Leave请求: ClientId=%d", args.ClientId%10000)
	case Move:
		sc.logger.Printf("收到Move请求: ClientId=%d", args.ClientId%10000)
	case Query:
		sc.logger.Printf("收到Query请求: ClientId=%d", args.ClientId%10000)
	default:
		sc.logger.Printf("收到未知类型请求: Type=%d, ClientId=%d", op.Type, args.ClientId%10000)
	}

	//sc.logger.Printf("已提交到Raft: index=%d, term=%d, ClientId=%d",
	//	index, term, args.ClientId%10000)

	ch := make(chan *Reply)
	sc.pendingChs[index] = ch
	sc.pendingOps[index] = &op
	sc.mu.Unlock()

	//最多等待时长
	timer := time.NewTimer(500 * time.Millisecond)
	defer timer.Stop()

	for {
		select {
		//在超时之前收到了命令
		case result := <-ch:
			sc.clearOld(index)
			reply.Err, reply.Config = result.Err, result.Config
			sc.logger.Printf("请求处理完成: index=%d, ClientId=%d, Err=%v",
				index, args.ClientId%10000, result.Err)
			return
		//等待超时：
		case <-timer.C:
			sc.clearOld(index)
			//可能当前节点是旧的领导者，处于少数分区，需要重新寻找新的领导者
			reply.Err = ErrWrongLeader
			reply.WrongLeader = true
			sc.logger.Printf("请求超时: index=%d, ClientId=%d", index, args.ClientId%10000)
			return
		//没一百毫秒检查一下是否还是领导者且任期没有变化，任期变化即使是领导者，当前命令可能也已经不见了
		case <-time.After(100 * time.Millisecond):
			if sc.rf.StateChanged(term) {
				sc.clearOld(index)
				reply.Err = ErrWrongLeader
				reply.WrongLeader = true
				sc.logger.Printf("领导者变更: index=%d, 旧term=%d", index, term)
				return
			}
		}
	}
}

func (sc *ShardCtrler) createConfig() *Config {
	prev := sc.configs[len(sc.configs)-1]
	config := new(Config)
	config.Num = prev.Num + 1
	//数组是值复制
	config.Shards = prev.Shards
	config.Groups = make(map[int][]string)

	for k, v := range prev.Groups {
		config.Groups[k] = v
	}
	sc.logger.Printf("创建新配置: 旧编号=%d → 新编号=%d", prev.Num, config.Num)
	return config
}

func (sc *ShardCtrler) shuffleShard(config *Config) {
	//可能时leadve之后组全部移除了，需要将分片重新分为0
	if len(config.Groups) == 0 {
		for i := 0; i < NShards; i++ {
			config.Shards[i] = 0
		}
		sc.logger.Printf("无可用组，重置所有分片到0")
		return
	}

	var gids []int
	for gid := range config.Groups {
		gids = append(gids, gid)
	}
	//固定顺序分配
	sort.Ints(gids)
	sc.logger.Printf("开始分片分配: 目标GIDs=%v, 分片总数=%d", gids, NShards)

	base := NShards / len(gids)
	extra := NShards % len(gids)

	//每个节点需要多少个分片
	targetCount := make(map[int]int)
	for i, gid := range gids {
		targetCount[gid] = base
		if i < extra {
			targetCount[gid]++
		}
		sc.logger.Printf("GID=%d 目标分片数: %d", gid, targetCount[gid])
	}

	//当前每个节点已经有的分片
	curCount := make(map[int]int)
	//多余的分片
	reallocated := make([]int, 0)

	for shard, gid := range config.Shards {
		if gid == 0 {
			reallocated = append(reallocated, shard)
		} else if _, exists := targetCount[gid]; !exists { // 当前的gid不存在目标中，说明是旧的gid，直接跳过，加入到空闲中去
			reallocated = append(reallocated, shard)
		} else {
			curCount[gid]++
			//超出了目标，多余的要放到未分配里边去
			if curCount[gid] > targetCount[gid] {
				//可以重置未0，但是也没必要，只是安全
				//config.Shards[shard] = 0
				reallocated = append(reallocated, shard)
			}
		}
	}

	sc.logger.Printf("待分配分片数: %d, 待分配列表=%v", len(reallocated), reallocated)

	for _, gid := range gids {
		need := targetCount[gid] - curCount[gid] // 还需要分配的数量
		if need <= 0 {
			continue
		}
		sc.logger.Printf("GID=%d 需要补充分片: %d个", gid, need)
		// 从待分配列表中取分片
		for i := 0; i < need && len(reallocated) > 0; i++ {
			shard := reallocated[0]
			reallocated = reallocated[1:]
			config.Shards[shard] = gid
			curCount[gid]++
			sc.logger.Printf("分片分配: shard=%d → GID=%d", shard, gid)
		}
	}
}

func (sc *ShardCtrler) doCmd(op *Op, index int) {
	//sc.logger.Printf("开始执行命令: index=%d, ClientId=%d, CmdId=%d",
	//	index, op.ClientId%10000, op.CmdId)

	reply := new(Reply)

	if op.Type == Query {
		targetNum := op.Num
		//等于也不行，因为存了一个空的
		if op.Num == -1 || op.Num >= len(sc.configs) {
			targetNum = len(sc.configs) - 1
			reply.Err, reply.Config = OK, sc.configs[len(sc.configs)-1]
		} else {
			reply.Err, reply.Config = OK, sc.configs[op.Num]
		}
		sc.logger.Printf("查询配置: 请求编号=%d → 实际编号=%d", op.Num, targetNum+1)
	} else if sc.checkDuplicated(op) { //还需要再次检查是否重复，二次检查，命令可能还未应用的时候多次发送
		reply.Err = ErrDuplicated
		sc.logger.Printf("命令重复执行: ClientId=%d, CmdId=%d", op.ClientId%10000, op.CmdId)
	} else {
		config := sc.createConfig()
		switch op.Type {
		case Move:
			config.Shards[op.Shard] = op.GID
			sc.logger.Printf("移动分片: shard=%d → GID=%d", op.Shard, op.GID)
		case Join:
			for gid, servers := range op.Servers {
				config.Groups[gid] = servers
				sc.logger.Printf("加入新组: GID=%d, 服务器列表=%v", gid, servers)
			}
			sc.shuffleShard(config)
		case Leave:
			for _, gid := range op.GIDs {
				delete(config.Groups, gid)
				sc.logger.Printf("移除组: GID=%d", gid)
			}
			sc.shuffleShard(config)
		}
		reply.Err = OK

		sc.configs = append(sc.configs, *config)
		sc.ClientCmdSeq[op.ClientId] = op.CmdId
		sc.logger.Printf("配置更新完成: 新配置编号=%d, 共%d个配置", config.Num, len(sc.configs))
	}

	//检查是否需要回复客户端
	ch, chExists := sc.pendingChs[index]
	oldOp, opExists := sc.pendingOps[index]

	// 验证通道存在且命令内容匹配
	if chExists && opExists &&
		oldOp.ClientId == op.ClientId &&
		oldOp.CmdId == op.CmdId {
		sc.logger.Printf("发送命令结果到通道: index=%d, ClientId=%d, CmdId=%d",
			index, op.ClientId%10000, op.CmdId)
		go func() { ch <- reply }()
	} else {
		//sc.logger.Printf("无等待通道接收结果: index=%d（可能已超时或领导者变更或非接受命令的server）", index)
	}
}

//循环接收raft提交的命令
func (sc *ShardCtrler) applier() {
	//代码没提供killed就暂时不考虑
	for {
		msg := <-sc.applyCh
		//sc.logger.Printf("收到raft提交的请求")
		//命令
		if msg.CommandValid {
			//类型断言并赋值
			index := msg.CommandIndex
			sc.mu.Lock()
			//sc.logger.Printf("收到Raft提交的命令: index=%d, term=%d", index, msg.CommandTerm)
			//只有新日志才处理(节点可能重启，导致状态丢失)
			if index > sc.LastApplied {
				//更新已提交日志索引
				//oldLastApplied := sc.LastApplied
				sc.LastApplied = index
				//sc.logger.Printf("已更新已应用日志索引: 旧索引=%d → 新索引=%d", oldLastApplied, index)
				//处理任务
				op := msg.Command.(Op)
				sc.doCmd(&op, index)
			} else {
				//sc.logger.Printf("命令已处理过: index=%d（当前lastApplied=%d）", index, sc.LastApplied)
			}
			sc.mu.Unlock()
		}
	}
}

func (sc *ShardCtrler) Join(args *JoinArgs, reply *JoinReply) {
	//sc.logger.Printf("收到Join请求: ClientId=%d", args.ClientId%10000)
	// Your code here.
	newArgs := Args{
		Type:     Join,
		Servers:  args.Servers, // 携带Join的服务器列表
		ClientId: args.ClientId,
		CmdId:    args.CmdId,
	}
	newReply := Reply{}
	sc.HandleRequest(&newArgs, &newReply)
	reply.Err = newReply.Err
	reply.WrongLeader = newReply.WrongLeader
}

func (sc *ShardCtrler) Leave(args *LeaveArgs, reply *LeaveReply) {
	//sc.logger.Printf("收到Leave请求: ClientId=%d", args.ClientId%10000)
	// Your code here.
	newArgs := Args{
		Type:     Leave,
		GIDs:     args.GIDs, // 携带Join的服务器列表
		ClientId: args.ClientId,
		CmdId:    args.CmdId,
	}
	newReply := Reply{}
	sc.HandleRequest(&newArgs, &newReply)
	reply.Err = newReply.Err
	reply.WrongLeader = newReply.WrongLeader
}

func (sc *ShardCtrler) Move(args *MoveArgs, reply *MoveReply) {
	//sc.logger.Printf("收到Move请求: ClientId=%d", args.ClientId%10000)
	// Your code here.
	newArgs := Args{
		Type:     Move,
		Shard:    args.Shard,
		GID:      args.GID, // 携带Join的服务器列表
		ClientId: args.ClientId,
		CmdId:    args.CmdId,
	}
	newReply := Reply{}
	sc.HandleRequest(&newArgs, &newReply)
	reply.Err = newReply.Err
	reply.WrongLeader = newReply.WrongLeader
}

func (sc *ShardCtrler) Query(args *QueryArgs, reply *QueryReply) {
	//sc.logger.Printf("收到Query请求: ClientId=%d, Num=%d", args.ClientId%10000, args.Num)
	// Your code here.
	newArgs := Args{
		Type:     Query,
		Num:      args.Num,
		ClientId: args.ClientId,
		CmdId:    args.CmdId,
	}
	newReply := Reply{}
	sc.HandleRequest(&newArgs, &newReply)
	reply.Err = newReply.Err
	reply.Config = newReply.Config
	reply.WrongLeader = newReply.WrongLeader
}

//
// the tester calls Kill() when a ShardCtrler instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sc *ShardCtrler) Kill() {
	sc.rf.Kill()
	// Your code here, if desired.
}

// needed by shardkv tester
func (sc *ShardCtrler) Raft() *raft.Raft {
	return sc.rf
}

//
// servers[] contains the ports of the set of
// servers that will cooperate via Raft to
// form the fault-tolerant shardctrler service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardCtrler {
	sc := new(ShardCtrler)
	sc.me = me

	sc.configs = make([]Config, 1)
	sc.configs[0].Groups = map[int][]string{}

	labgob.Register(Op{})
	sc.applyCh = make(chan raft.ApplyMsg)
	sc.rf = raft.Make(servers, me, persister, sc.applyCh)

	// Your code here.

	sc.ClientCmdSeq = make(map[int64]int64)   // 去重映射
	sc.pendingChs = make(map[int]chan *Reply) // 等待结果的通道
	sc.pendingOps = make(map[int]*Op)         // 等待的命令
	sc.LastApplied = 0

	sc.logger = log.New(os.Stdout, fmt.Sprintf("ShardCtrler%d: ", me), log.LstdFlags|log.Lmicroseconds)

	go sc.applier()

	return sc
}
