package shardkv

import (
	"6.824/labrpc"
	"bytes"
	"fmt"
	"log"
	"os"
	"sync/atomic"
	"time"
)
import "6.824/raft"
import "sync"
import "6.824/labgob"
import "6.824/shardctrler"

type Op struct {
	// Your definitions here.
	// Field names must start with capital letters,
	// otherwise RPC will break.
}

type ShardKV struct {
	mu           sync.RWMutex
	me           int
	rf           *raft.Raft
	applyCh      chan raft.ApplyMsg
	make_end     func(string) *labrpc.ClientEnd
	gid          int
	ctrlers      []*labrpc.ClientEnd
	maxraftstate int // snapshot if log grows this big

	// Your definitions here.
	//当前副本是否存活
	dead int32

	//持久化器和对应的raft节点是同一个
	persister *raft.Persister

	//当前节点关键的查询客户端，用于向控制中心发起请求
	// 当前index的命令对应的传输通道
	pendingChs map[int]chan *Reply
	// 当前index的命令
	pendingOps map[int]*OpCommand

	//缓存当前节点收到的删除分片的命令
	pendingDelete map[int]*DeleteInfo

	//当前节点是否已经在处理快照了，避免在需要快照时多次额外快照
	snapshotInProgress int32

	//每个切片存放的数据信息
	Shards [shardctrler.NShards]Shard

	//用于查询配置
	mck *shardctrler.Clerk

	//上一次配置，用于拉取切片
	prevConfig shardctrler.Config

	//查询到的最近一次配置信息
	curConfig shardctrler.Config

	logger *log.Logger // 日志记录器

	//当前已执行的日志序列
	LastApplied int

	//最近执行的命令的序列号
	ClientCmdSeq map[int64]int64
}

//定期检查插入一个空命令，用于强制同步不同节点的日志，可能进入了下一个term但是一直没有本term的日志，导致此前的日志一直无法提交
func (kv *ShardKV) insertEmpty() {
	for !kv.Killed() {
		if _, isLeader := kv.rf.GetState(); isLeader {
			if kv.rf.HasNoCurTermLog() {
				kv.rf.Start(EmptyCommand{})
			}
		}
		time.Sleep(100 * time.Millisecond)
	}
}

func (kv *ShardKV) applyEmtyCommand() {

}

func (kv *ShardKV) checkDuplicated(op *OpCommand) bool {
	id, ok := kv.ClientCmdSeq[op.ClientId]
	//检查是否存在客户端对应信息已经是否已经过期
	return ok && id >= op.CmdId
}

func (kv *ShardKV) clearDelete(index int) {
	// 清理映射表
	kv.mu.Lock()
	delete(kv.pendingDelete, index)
	kv.mu.Unlock()
	kv.logger.Printf("清理过期或已处理删除等待信息: index=%d", index)
}

func (kv *ShardKV) createSnapshot() []byte {
	w := new(bytes.Buffer)
	e := labgob.NewEncoder(w)
	e.Encode(kv.Shards)
	e.Encode(kv.ClientCmdSeq)
	e.Encode(kv.LastApplied)
	e.Encode(kv.prevConfig)
	e.Encode(kv.curConfig)
	data := w.Bytes()
	return data
}

func (kv *ShardKV) ReadAndApplySnapshot(data []byte) {
	if data == nil || len(data) < 1 { // bootstrap without any state?
		kv.logger.Printf("ReadAndApplySnapshot：快照大小为0，无法应用快照")
		return
	}
	r := bytes.NewBuffer(data)
	d := labgob.NewDecoder(r)

	var Shards [shardctrler.NShards]Shard
	var ClientCmdSeq map[int64]int64
	var LastApplied int
	var PrevConfig shardctrler.Config
	var CurConfig shardctrler.Config

	if d.Decode(&Shards) != nil || d.Decode(&ClientCmdSeq) != nil || d.Decode(&LastApplied) != nil || d.Decode(&PrevConfig) != nil || d.Decode(&CurConfig) != nil {
		kv.logger.Fatalf("Decode Error\n")
	}

	kv.mu.Lock()
	defer kv.mu.Unlock()

	//可能因为并发问题，导致快照过期了
	if kv.LastApplied >= LastApplied {
		kv.logger.Printf("ReadAndApplySnapshot： KVServer %d: ignore snapshot (index %d) since lastApplied is %d",
			kv.me, LastApplied, kv.LastApplied)
		return
	}
	kv.Shards = Shards
	kv.ClientCmdSeq = ClientCmdSeq
	kv.LastApplied = LastApplied
	kv.prevConfig = PrevConfig
	kv.curConfig = CurConfig

	kv.logger.Printf("ReadAndApplySnapshot： 应用快照成功，LastApplied为%d，prev配置版本号为%d，cur配置版本号为%d",
		LastApplied, PrevConfig.Num, CurConfig.Num)
}

func (kv *ShardKV) clearOld(index int) {
	// 清理映射表
	kv.mu.Lock()
	delete(kv.pendingChs, index)
	delete(kv.pendingOps, index)
	kv.mu.Unlock()
	kv.logger.Printf("清理过期等待信息: index=%d", index)
}

func (kv *ShardKV) applier() {
	for !kv.Killed() {
		for msg := range kv.applyCh {
			if msg.CommandValid {
				index := msg.CommandIndex
				kv.mu.Lock()
				if index > kv.LastApplied {
					kv.logger.Printf("applier: 准备处理新命令: 索引=%d（当前LastApplied=%d）", index, kv.LastApplied)
					kv.LastApplied = index
					switch msg.Command.(type) {
					case ConfigCommand:
						//kv.logger.Printf("处理配置命令: 索引=%d", index)
						kv.applyConfig(&msg)
					case OpCommand:
						//kv.logger.Printf("处理操作命令: 索引=%d", index)
						kv.applyCmd(&msg, index)
					case ShardCommand:
						kv.appllyShardCommand(&msg, index)
					case EmptyCommand:
						kv.applyEmtyCommand()
					default:
						kv.logger.Printf("未知命令: 索引=%d",
							index)
					}
					//需要快照，且raft持久化信息过大
					//if kv.maxraftstate != -1 && kv.persister.RaftStateSize() >= kv.maxraftstate && atomic.LoadInt32(&kv.snapshotInProgress) == 0 {
					//因为这个判断不是并发的，所以有可能上一步快照没更新完，日志还是很大，但是刚好判断到后边，日志更新完快照了，导致马上又执行快照
					if kv.maxraftstate != -1 && kv.persister.RaftStateSize() >= kv.maxraftstate {
						origin := kv.persister.RaftStateSize()
						//标记正在创建快照
						atomic.StoreInt32(&kv.snapshotInProgress, 1)
						kv.logger.Printf("applier: 处理索引%d时发现当前raft节点持久化数据超过阈值，创建并更新节点快照信息", index)
						//根据自身状态创建快照
						data := kv.createSnapshot()
						//让节点更新快照数据,后台执行，不用担心顺序问题，因为raft节点执行时会判断需要保存快照
						kv.rf.Snapshot(index, data)
						kv.logger.Printf("applier: 创建索引%d处快照之后，持久化数据大小由%d -> %d", index, origin, kv.persister.RaftStateSize())
					}

				} else {
					kv.logger.Printf("applier: 跳过已处理命令: 索引=%d（当前LastApplied=%d）", index, kv.LastApplied)
				}
				kv.mu.Unlock()
			} else if msg.SnapshotValid { //快照类型的数据
				kv.logger.Printf("applier: 开始处理快照: 索引=%d（当前LastApplied=%d）", msg.SnapshotIndex, kv.LastApplied)
				kv.ReadAndApplySnapshot(msg.Snapshot)
			}
		}
	}
}

//
// the tester calls Kill() when a ShardKV instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (kv *ShardKV) Kill() {
	kv.rf.Kill()
	// Your code here, if desired.
	atomic.StoreInt32(&kv.dead, 1)
}

func (kv *ShardKV) Killed() bool {
	// Your code here, if desired.
	return atomic.LoadInt32(&kv.dead) == 1
}

//
// servers[] contains the ports of the servers in this group.
//
// me is the index of the current server in servers[].
//
// the k/v server should store snapshots through the underlying Raft
// implementation, which should call persister.SaveStateAndSnapshot() to
// atomically save the Raft state along with the snapshot.
//
// the k/v server should snapshot when Raft's saved state exceeds
// maxraftstate bytes, in order to allow Raft to garbage-collect its
// log. if maxraftstate is -1, you don't need to snapshot.
//
// gid is this group's GID, for interacting with the shardctrler.
//
// pass ctrlers[] to shardctrler.MakeClerk() so you can send
// RPCs to the shardctrler.
//
// make_end(servername) turns a server name from a
// Config.Groups[gid][i] into a labrpc.ClientEnd on which you can
// send RPCs. You'll need this to send RPCs to other groups.
//
// look at client.go for examples of how to use ctrlers[]
// and make_end() to send RPCs to the group owning a specific shard.
//
// StartServer() must return quickly, so it should start goroutines
// for any long-running work.
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister, maxraftstate int, gid int, ctrlers []*labrpc.ClientEnd, make_end func(string) *labrpc.ClientEnd) *ShardKV {
	// call labgob.Register on structures you want
	// Go's RPC library to marshall/unmarshall.
	labgob.Register(Op{})
	labgob.Register(OpCommand{})
	labgob.Register(ConfigCommand{})
	labgob.Register(ShardCommand{})
	labgob.Register(EmptyCommand{})

	kv := new(ShardKV)
	kv.me = me
	kv.maxraftstate = maxraftstate
	kv.make_end = make_end
	kv.gid = gid
	kv.ctrlers = ctrlers

	// Your initialization code here.

	// Use something like this to talk to the shardctrler:
	// kv.mck = shardctrler.MakeClerk(kv.ctrlers)

	kv.applyCh = make(chan raft.ApplyMsg)
	kv.rf = raft.Make(servers, me, persister, kv.applyCh)

	kv.mck = shardctrler.MakeClerk(kv.ctrlers)
	kv.LastApplied = 0

	kv.prevConfig = shardctrler.Config{Num: 0}
	kv.curConfig = shardctrler.Config{Num: 0}

	//初始化切片数据库
	for i := range kv.Shards {
		kv.Shards[i].DB = make(map[string]string)
	}

	kv.ClientCmdSeq = make(map[int64]int64)
	kv.logger = log.New(os.Stdout, fmt.Sprintf("gid%dserver%d: ", gid, me), log.LstdFlags|log.Lmicroseconds)
	kv.logger.Printf("启动服务器，组为%d", gid)
	kv.pendingChs = make(map[int]chan *Reply)
	kv.pendingOps = make(map[int]*OpCommand)
	kv.pendingDelete = make(map[int]*DeleteInfo)
	kv.persister = persister
	kv.LastApplied = 0
	kv.ReadAndApplySnapshot(persister.ReadSnapshot())

	go kv.applier()

	go kv.pullConfig()

	go kv.pullShard()

	go kv.notifyShard()

	go kv.insertEmpty()

	return kv
}
