package shardkv

import (
	"bytes"
	"runtime"
	"strconv"
	"sync"
	"sync/atomic"
	"time"

	"6.824/labgob"
	"6.824/labrpc"
	"6.824/raft"
	"6.824/shardctrler"
)

type Op struct {
	// Your definitions here.
	// Field names must start with capital letters,
	// otherwise RPC will break.
	OpType      string
	Args        interface{}
	ClinetId    int64
	IdentifySeq int64
}

type ShardKV struct {
	mu           sync.Mutex
	me           int
	rf           *raft.Raft
	applyCh      chan raft.ApplyMsg
	make_end     func(string) *labrpc.ClientEnd
	gid          int
	ctrlers      []*labrpc.ClientEnd
	maxraftstate int // snapshot if log grows this big

	// Your definitions here.
	dead       int32
	ClientId   int64               //persist
	data       []map[string]string //persist
	applyIndex int                 //persist
	fromIndex  int

	identifySeqs   []map[int64]int64 //persist
	replyChannels  []chan ApplyResult
	config         shardctrler.Config //persist
	toUpdateConfig shardctrler.Config //persist

	needUpdateConfigCh chan bool
	serverPersistCh    chan bool
}

func (kv *ShardKV) adjustIndex(index int) int {
	return index - kv.fromIndex
}

func (kv *ShardKV) waitLogApply(index int, term int) (Err, ApplyResult) {
	kv.mu.Lock()
	if kv.adjustIndex(index) <= 0 {
		kv.mu.Unlock()
		return Err("timeout"), ApplyResult{}
	}

	if len(kv.replyChannels) > kv.adjustIndex(index) {
		kv.replyChannels[kv.adjustIndex(index)] = make(chan ApplyResult, 1)
	} else {
		for len(kv.replyChannels)-1 < kv.adjustIndex(index-1) {
			kv.replyChannels = append(kv.replyChannels, nil)
		}
		kv.replyChannels = append(kv.replyChannels, make(chan ApplyResult, 1))
	}

	toReadChannel := kv.replyChannels[kv.adjustIndex(index)]
	kv.mu.Unlock()

	select {
	case applyResult := <-toReadChannel:
		toReadChannel <- applyResult // give back
		if applyResult.term != term {
			if applyResult.term == -1 {
				return ErrWrongGroup, applyResult
			} else if applyResult.term == -2 {
				// 校验失败
				return Err("not suit"), ApplyResult{}
			} else if applyResult.term == -3 {
				return ErrNoKey, ApplyResult{}
			}

			return Err("apply failed"), ApplyResult{}
		}

		// append success
		return OK, applyResult

	case <-time.After(2000 * time.Millisecond):
		// timeout: 2000ms
		return Err("timeout"), ApplyResult{}
	}
}

func (kv *ShardKV) Get(args *GetArgs, reply *GetReply) {
	// Your code here.
	kv.mu.Lock()
	shard_idx := int(args.Key[0]) % shardctrler.NShards
	// 稳定下来后，分片归属不是本人时， 以及未稳定时，既非本人，也非下次配置本人时，拒绝。其他情况是有可能请求成功的。
	if (kv.config.Num == kv.toUpdateConfig.Num && kv.config.Shards[shard_idx] != kv.gid) ||
		(kv.config.Num != kv.toUpdateConfig.Num && kv.config.Shards[shard_idx] != kv.gid && kv.toUpdateConfig.Shards[shard_idx] != kv.gid) {
		kv.mu.Unlock()
		runtime.Gosched() // 等待一点时间，避免询问太频繁
		reply.Err = ErrWrongGroup
		return
	}

	index, term, isLeader := kv.rf.Start(Op{"Get", KeyValue{kv.config.Num, args.Key, ""}, args.ClinetId, args.IdentifySeq})
	if !isLeader {
		kv.mu.Unlock()
		reply.Err = ErrWrongLeader
		return
	}
	kv.mu.Unlock()

	applyErr, applyRes := kv.waitLogApply(index, term)

	if applyErr == OK {
		reply.Err = OK
		// ？？？：这里是否会因为时差的问题，导致获取时已使得Shard归属产生改变
		reply.Value = applyRes.ret.(string)
	} else {
		reply.Err = applyErr
	}
}

func (kv *ShardKV) PutAppend(args *PutAppendArgs, reply *PutAppendReply) {
	// Your code here.
	kv.mu.Lock()
	shard_idx := int(args.Key[0]) % shardctrler.NShards
	// 稳定下来后，分片归属不是本人时， 以及未稳定时，既非本人，也非下次配置本人时，拒绝
	if (kv.config.Num == kv.toUpdateConfig.Num && kv.config.Shards[shard_idx] != kv.gid) ||
		(kv.config.Num != kv.toUpdateConfig.Num && kv.config.Shards[shard_idx] != kv.gid && kv.toUpdateConfig.Shards[shard_idx] != kv.gid) {
		kv.mu.Unlock()
		runtime.Gosched() // 等待一点时间，避免询问太频繁
		reply.Err = ErrWrongGroup
		return
	}

	index, term, isLeader := kv.rf.Start(Op{args.Op, KeyValue{kv.config.Num, args.Key, args.Value}, args.ClinetId, args.IdentifySeq})
	kv.mu.Unlock()
	if !isLeader {
		reply.Err = ErrWrongLeader
		return
	}

	applyRes, _ := kv.waitLogApply(index, term)
	reply.Err = applyRes
}

func (kv *ShardKV) checkConfigDiffAndUpdate() {
	// 对比分片情况，更新配置
	if kv.config.Shards == kv.toUpdateConfig.Shards && kv.config.Num+1 == kv.toUpdateConfig.Num {
		kv.config.Num = kv.toUpdateConfig.Num
		kv.config.Groups = kv.toUpdateConfig.Groups // 引用转移过去就好，新询问请求会创建新的
	}
}

func (kv *ShardKV) selectApplyLogs(cmd Op, term int) {
	// 等幂性保证
	if cmd.OpType == "Put" || cmd.OpType == "Append" {
		shard_idx := int(cmd.Args.(KeyValue).Key[0]) % shardctrler.NShards
		if kv.identifySeqs[shard_idx][cmd.ClinetId] == cmd.IdentifySeq {
			kv.applyIndex++
			if len(kv.replyChannels) > kv.adjustIndex(kv.applyIndex) && kv.replyChannels[kv.adjustIndex(kv.applyIndex)] != nil {
				kv.replyChannels[kv.adjustIndex(kv.applyIndex)] <- ApplyResult{term, nil}
			}
			return
		}
	}

	getOpRetValue := ""
	var toSendData map[string]string
	var toSendShardIdentifySeqs map[int64]int64

	kv.checkConfigDiffAndUpdate()

	switch cmd.OpType {
	case "Put":
		shard_idx := int(cmd.Args.(KeyValue).Key[0]) % shardctrler.NShards
		if kv.config.Shards[shard_idx] != kv.gid || kv.config.Num != cmd.Args.(KeyValue).Num {
			// 不应该应用，此时应返回term为-1表示应用失败
			term = -1
		} else {
			kv.data[shard_idx][cmd.Args.(KeyValue).Key] = cmd.Args.(KeyValue).Value
		}

	case "Append":
		shard_idx := int(cmd.Args.(KeyValue).Key[0]) % shardctrler.NShards
		if kv.config.Shards[shard_idx] != kv.gid || kv.config.Num != cmd.Args.(KeyValue).Num {
			term = -1
		} else {
			kv.data[shard_idx][cmd.Args.(KeyValue).Key] =
				kv.data[shard_idx][cmd.Args.(KeyValue).Key] + cmd.Args.(KeyValue).Value
		}

	case "Get":
		shard_idx := int(cmd.Args.(KeyValue).Key[0]) % shardctrler.NShards
		if kv.config.Shards[shard_idx] != kv.gid || kv.config.Num != cmd.Args.(KeyValue).Num {
			term = -1
		} else {
			t_getOpRetValue, ok := kv.data[shard_idx][cmd.Args.(KeyValue).Key]
			if ok {
				getOpRetValue = t_getOpRetValue
			} else {
				term = -3 // no key
			}
		}

	case "SaveShardLog":
		if kv.config.Num+1 == cmd.Args.(SaveShardLogArgs).Num {
			if kv.config.Shards[cmd.Args.(SaveShardLogArgs).ShardId] != kv.gid {
				kv.config.Shards[cmd.Args.(SaveShardLogArgs).ShardId] = kv.gid
				kv.data[cmd.Args.(SaveShardLogArgs).ShardId] = cloneStr2StrMap(cmd.Args.(SaveShardLogArgs).ShardData)
				kv.identifySeqs[cmd.Args.(SaveShardLogArgs).ShardId] = cloneInt64Int64Map(cmd.Args.(SaveShardLogArgs).ShardIdentifySeqs)
				kv.checkConfigDiffAndUpdate()
			}
		} else if kv.config.Num+1 > cmd.Args.(SaveShardLogArgs).Num {
			// 先前已确认成功，可能接收方丢失了，所以这种情况可认为接收成功
		} else {
			// kv.config.Num+1 < cmd.Args.(SaveShardLogArgs).Num  这种情况绝不能接收
			term = -2 // -2表示应用失败
		}

	case "SendOneComplete":
		if kv.config.Num+1 == cmd.Args.(MapShard2Gid).Num {
			if kv.config.Shards[cmd.Args.(MapShard2Gid).ShardId] == -1 {
				kv.config.Shards[cmd.Args.(MapShard2Gid).ShardId] = cmd.Args.(MapShard2Gid).Gid
				kv.data[cmd.Args.(MapShard2Gid).ShardId] = make(map[string]string) // 已发送，可清空
				kv.checkConfigDiffAndUpdate()
			}
		} else if kv.config.Num+1 > cmd.Args.(MapShard2Gid).Num {
			// 认为已成功了
		} else {
			// kv.config.Num+1 < cmd.Args.(MapShard2Gid).Num
			// 这种情况绝不能接收
			term = -2
		}

	case "AppendNewConfig":
		if kv.config.Num+1 == cmd.Args.(shardctrler.Config).Num && cmd.Args.(shardctrler.Config).Num > kv.toUpdateConfig.Num {
			kv.toUpdateConfig = cmd.Args.(shardctrler.Config)
			if kv.config.Num == 0 && cmd.Args.(shardctrler.Config).Num == 1 {
				// 初始配置的话直接覆盖即可
				kv.config.Shards = cmd.Args.(shardctrler.Config).Shards
			}

			for i := 0; i < shardctrler.NShards; i++ {
				if kv.config.Shards[i] != kv.toUpdateConfig.Shards[i] {
					if kv.config.Shards[i] != -1 && kv.config.Shards[i] != kv.gid && kv.toUpdateConfig.Shards[i] != kv.gid {
						// 如果将配置的shard与自己无关，则直接认为完成即可
						kv.config.Shards[i] = kv.toUpdateConfig.Shards[i]
					}
				}
			}
			kv.checkConfigDiffAndUpdate()

			clearBoolenChannel(kv.needUpdateConfigCh)
			kv.needUpdateConfigCh <- true
		} else {
			term = -2
		}

	case "AskShardDataToSend":
		if kv.config.Num+1 == cmd.Args.(AskShardDataToSendArgs).Num {
			kv.config.Shards[cmd.Args.(AskShardDataToSendArgs).ShardId] = -1 // 此时，该shard处于无人接手状态
			toSendData = cloneStr2StrMap(kv.data[cmd.Args.(AskShardDataToSendArgs).ShardId])
			toSendShardIdentifySeqs = cloneInt64Int64Map(kv.identifySeqs[cmd.Args.(AskShardDataToSendArgs).ShardId])
		} else {
			term = -2
		}

	default:
		panic("Unknown OP")
	}

	if term >= 0 && (cmd.OpType == "Put" || cmd.OpType == "Append") {
		shard_idx := int(cmd.Args.(KeyValue).Key[0]) % shardctrler.NShards
		kv.identifySeqs[shard_idx][cmd.ClinetId] = cmd.IdentifySeq
	}

	kv.applyIndex++

	if len(kv.replyChannels) > kv.adjustIndex(kv.applyIndex) && kv.replyChannels[kv.adjustIndex(kv.applyIndex)] != nil {
		switch cmd.OpType {
		case "AskShardDataToSend":
			kv.replyChannels[kv.adjustIndex(kv.applyIndex)] <- ApplyResult{term, ShardDataSendPack{toSendData, toSendShardIdentifySeqs}}
		case "Get":
			kv.replyChannels[kv.adjustIndex(kv.applyIndex)] <- ApplyResult{term, getOpRetValue}
		default:
			kv.replyChannels[kv.adjustIndex(kv.applyIndex)] <- ApplyResult{term, nil}
		}
	}
}

func (kv *ShardKV) applyLogDaemon() {
	apply_count := 0
	for msg := range kv.applyCh {
		if kv.killed() {
			return
		}

		if msg.SnapshotValid {
			kv.mu.Lock()
			kv.applySnapshotData(msg.Snapshot)
			kv.mu.Unlock()
			continue
		}

		raft.Assert(msg.CommandIndex == kv.applyIndex+1, "not msg.CommandIndex==kv.applyIndex+1: "+strconv.Itoa(msg.CommandIndex)+"-"+strconv.Itoa(kv.applyIndex))
		apply_count++

		cmd := msg.Command.(Op)
		term := msg.CommandTerm

		kv.mu.Lock()
		kv.selectApplyLogs(cmd, term)
		kv.mu.Unlock()

		if kv.maxraftstate != -1 && apply_count > kv.maxraftstate/100 {
			apply_count = 0
			clearBoolenChannel(kv.serverPersistCh)
			kv.serverPersistCh <- true
		}

	}
}

func cloneStr2StrMap(src map[string]string) map[string]string {
	dst := make(map[string]string)
	for key, value := range src {
		dst[key] = value
	}
	return dst
}

func cloneInt64Int64Map(src map[int64]int64) map[int64]int64 {
	dst := make(map[int64]int64)
	for key, value := range src {
		dst[key] = value
	}
	return dst
}

func (kv *ShardKV) requestSendDiffLog(send_num int, gid int, shardId int) {

	kv.mu.Lock()
	index, term, isLeader := kv.rf.Start(Op{"AskShardDataToSend", AskShardDataToSendArgs{send_num, shardId}, kv.ClientId, nrand()})
	kv.mu.Unlock()
	if !isLeader {
		return
	}

	applyErr, applyResult := kv.waitLogApply(index, term)

	if applyErr != OK {
		clearBoolenChannel(kv.needUpdateConfigCh)
		kv.needUpdateConfigCh <- true
		return
	}

	kv.mu.Lock()
	if servers, ok := kv.toUpdateConfig.Groups[gid]; ok { // 在该线程完成前，可以认为该终端config不会更新，所以这里不用应用状态
		// try each server for the shard.
		args := SaveShardLogArgs{send_num, shardId, cloneStr2StrMap(applyResult.ret.(ShardDataSendPack).SendData), cloneInt64Int64Map(applyResult.ret.(ShardDataSendPack).SendShardIdentifySeqs), kv.ClientId, nrand()}
		kv.mu.Unlock()

		notokCount, totalNotokCount := 0, 0
		for si := 0; ; {
			si = si % len(servers)
			srv := kv.make_end(servers[si])
			reply := SaveShardLogReply{Err("blank")}
			ok := srv.Call("ShardKV.SaveShardLog", &args, &reply)
			if ok {
				notokCount, totalNotokCount = 0, 0
				if reply.Err == OK {
					kv.mu.Lock()
					if kv.config.Shards[shardId] != -1 { // 此时，已有线程完成该任务,无需再继续
						kv.mu.Unlock()
						return
					}

					index, term, isLeader := kv.rf.Start(Op{"SendOneComplete", MapShard2Gid{send_num, shardId, gid}, kv.ClientId, nrand()})
					kv.mu.Unlock()
					if !isLeader {
						return
					}

					applyRes, _ := kv.waitLogApply(index, term)

					if applyRes != OK {
						clearBoolenChannel(kv.needUpdateConfigCh)
						kv.needUpdateConfigCh <- true
					}
					return

				} else if reply.Err == Err("wait me") {
					// 对方落后, 等待让其跟上
					time.Sleep(50 * time.Millisecond)
				} else if reply.Err == ErrWrongLeader {
					si++
				} else if reply.Err == Err("not suit") {
					// 校验未通过, 重新发送
					clearBoolenChannel(kv.needUpdateConfigCh)
					kv.needUpdateConfigCh <- true
					return
				}
			} else {
				notokCount++
				totalNotokCount++
				if notokCount > 3 {
					notokCount = 0
					si++
				}

				if totalNotokCount > 20 {
					clearBoolenChannel(kv.needUpdateConfigCh)
					kv.needUpdateConfigCh <- true
					return
				}

				time.Sleep(20 * time.Millisecond)
			}
		}
	} else {
		kv.mu.Unlock()
	}

	raft.Assert(false, strconv.Itoa(kv.gid)+" Should not reach here")
}

func (kv *ShardKV) handoffDiffDaemon() {
Listen:
	for range kv.needUpdateConfigCh {
		if kv.killed() {
			return
		}

		kv.mu.Lock()
		clearBoolenChannel(kv.needUpdateConfigCh)

		_, isLeader := kv.rf.GetState()
		if !isLeader {
			kv.mu.Unlock()
			continue Listen
		}

		if kv.config.Num+1 != kv.toUpdateConfig.Num {
			kv.mu.Unlock()
			continue Listen
		}

		send_num := kv.config.Num + 1
		toSend := [][]int{} //(gid, shard_id)
		for i := 0; i < shardctrler.NShards; i++ {
			if kv.config.Shards[i] != kv.toUpdateConfig.Shards[i] {
				if kv.config.Shards[i] == kv.gid || kv.config.Shards[i] == -1 { // -1意味着有线程曾经尝试发送，但可能未成功
					// 新配置中是将由其他组维护该shard，向该组kv.toUpdateConfig.Shards[i]发送
					toSend = append(toSend, []int{kv.toUpdateConfig.Shards[i], i})
				}
			}
		}

		kv.mu.Unlock()

		for i := 0; i < len(toSend); i++ {
			go kv.requestSendDiffLog(send_num, toSend[i][0], toSend[i][1])
		}

	}
}

func clearBoolenChannel(ch chan bool) {
	for {
		select {
		case <-ch:

		default:
			return
		}
	}
}

func (kv *ShardKV) applySnapshotData(snapshotData []byte) {
	if snapshotData == nil || len(snapshotData) < 1 {
		return
	}

	r := bytes.NewBuffer(snapshotData)
	d := labgob.NewDecoder(r)

	var cleintId int64
	var data []map[string]string
	var applyIndex int
	var identifySeqs []map[int64]int64
	var config shardctrler.Config
	var toUpdateConfig shardctrler.Config
	d.Decode(&cleintId)
	d.Decode(&data)
	d.Decode(&applyIndex)
	d.Decode(&identifySeqs)
	d.Decode(&config)
	d.Decode(&toUpdateConfig)

	kv.ClientId = cleintId
	kv.data = data
	kv.applyIndex = applyIndex
	kv.identifySeqs = identifySeqs
	kv.fromIndex = applyIndex
	kv.config = config
	kv.toUpdateConfig = toUpdateConfig

	kv.replyChannels = []chan ApplyResult{nil}

	clearBoolenChannel(kv.needUpdateConfigCh)
	kv.needUpdateConfigCh <- true
}

func (kv *ShardKV) makeSnapshotData() []byte {
	w := new(bytes.Buffer)
	e := labgob.NewEncoder(w)

	e.Encode(kv.ClientId)
	e.Encode(kv.data)
	e.Encode(kv.applyIndex)
	e.Encode(kv.identifySeqs)
	e.Encode(kv.config)
	e.Encode(kv.toUpdateConfig)

	data := w.Bytes()
	return data
}

func (kv *ShardKV) doSnapshotOnceDaemon() {
	if kv.maxraftstate == -1 {
		return
	}

	for range kv.serverPersistCh {
		time.Sleep(100 * time.Millisecond)

		kv.mu.Lock()
		if kv.rf.ZeroLogIndex() >= kv.applyIndex {
			kv.mu.Unlock()
			continue
		}

		kv.rf.Snapshot(kv.applyIndex, kv.makeSnapshotData())

		if kv.adjustIndex(kv.applyIndex) < len(kv.replyChannels) {
			kv.replyChannels = kv.replyChannels[kv.adjustIndex(kv.applyIndex):]
		} else {
			kv.replyChannels = []chan ApplyResult{nil}
		}

		kv.fromIndex = kv.applyIndex
		kv.mu.Unlock()
	}

}

// 等其他组主动发送分片过来
func (kv *ShardKV) SaveShardLog(args *SaveShardLogArgs, reply *SaveShardLogReply) {
	kv.mu.Lock()
	index, term, isLeader := kv.rf.Start(Op{"SaveShardLog", *args, args.ClinetId, args.IdentifySeq})
	kv.mu.Unlock()
	if !isLeader {
		reply.Err = ErrWrongLeader
		return
	}

	applyRes, _ := kv.waitLogApply(index, term)
	reply.Err = applyRes
}

func (kv *ShardKV) qureyConfigDaemon() {
	ctrLeaderIdx := 0
	const queryPeriod = 100 * time.Millisecond
	queryTimer := time.NewTimer(2 * queryPeriod)
	queryCountNoGoing := 0

QueryCycle:
	for range queryTimer.C {
		if kv.killed() {
			return
		}

		kv.mu.Lock()
		// 仅需要leader去完成
		_, isLeader := kv.rf.GetState()
		if !isLeader {
			kv.mu.Unlock()
			queryTimer.Reset(queryPeriod)
			continue QueryCycle
		}

		if kv.config.Num+1 == kv.toUpdateConfig.Num {
			queryCountNoGoing++
			if queryCountNoGoing >= 5 {
				queryCountNoGoing = 0
				clearBoolenChannel(kv.needUpdateConfigCh)
				kv.needUpdateConfigCh <- true
			}
			// 未更新完
			kv.mu.Unlock()
			queryTimer.Reset(queryPeriod)
			continue QueryCycle
		}

		queryCountNoGoing = 0

		args := shardctrler.QueryArgs{Num: kv.config.Num + 1, ClinetId: kv.ClientId, IdentifySeq: nrand()}
		kv.mu.Unlock()
		notokCount := 0
		for {
			srv := kv.ctrlers[ctrLeaderIdx]
			var reply shardctrler.QueryReply
			ok := srv.Call("ShardCtrler.Query", &args, &reply)

			if ok {
				notokCount = 0
				kv.mu.Lock()
				if !reply.WrongLeader {
					if reply.Err == OK {
						if reply.Config.Num == 0 {
							kv.mu.Unlock()
							break
						}

						if kv.config.Num >= reply.Config.Num {
							kv.mu.Unlock()
							break
						}

						index, term, isLeader := kv.rf.Start(Op{"AppendNewConfig", reply.Config, kv.ClientId, nrand()})
						kv.mu.Unlock()
						if !isLeader {
							break
						}

						kv.waitLogApply(index, term)
						break
					}
				} else {
					ctrLeaderIdx = (ctrLeaderIdx + 1) % len(kv.ctrlers)
				}
				kv.mu.Unlock()
			} else {
				notokCount++
				if notokCount > 4 {
					notokCount = 0
					ctrLeaderIdx = (ctrLeaderIdx + 1) % len(kv.ctrlers)
				}

				time.Sleep(10 * time.Millisecond)
			}
		}

		queryTimer.Reset(queryPeriod)
	}
}

// the tester calls Kill() when a ShardKV instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
func (kv *ShardKV) Kill() {
	kv.rf.Kill()
	// Your code here, if desired.
	atomic.StoreInt32(&kv.dead, 1)
}

func (kv *ShardKV) killed() bool {
	z := atomic.LoadInt32(&kv.dead)
	return z == 1
}

// servers[] contains the ports of the servers in this group.
//
// me is the index of the current server in servers[].
//
// the k/v server should store snapshots through the underlying Raft
// implementation, which should call persister.SaveStateAndSnapshot() to
// atomically save the Raft state along with the snapshot.
//
// the k/v server should snapshot when Raft's saved state exceeds
// maxraftstate bytes, in order to allow Raft to garbage-collect its
// log. if maxraftstate is -1, you don't need to snapshot.
//
// gid is this group's GID, for interacting with the shardctrler.
//
// pass ctrlers[] to shardctrler.MakeClerk() so you can send
// RPCs to the shardctrler.
//
// make_end(servername) turns a server name from a
// Config.Groups[gid][i] into a labrpc.ClientEnd on which you can
// send RPCs. You'll need this to send RPCs to other groups.
//
// look at client.go for examples of how to use ctrlers[]
// and make_end() to send RPCs to the group owning a specific shard.
//
// StartServer() must return quickly, so it should start goroutines
// for any long-running work.
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister, maxraftstate int, gid int, ctrlers []*labrpc.ClientEnd, make_end func(string) *labrpc.ClientEnd) *ShardKV {
	// call labgob.Register on structures you want
	// Go's RPC library to marshall/unmarshall.
	labgob.Register(Op{})

	kv := new(ShardKV)
	kv.me = me
	kv.maxraftstate = maxraftstate
	kv.make_end = make_end
	kv.gid = gid
	kv.ctrlers = ctrlers

	// Your initialization code here.
	kv.dead = 0
	kv.config = shardctrler.Config{Num: 0, Shards: [shardctrler.NShards]int{0}, Groups: nil}
	kv.data = make([]map[string]string, shardctrler.NShards)
	for i := 0; i < shardctrler.NShards; i++ {
		kv.data[i] = make(map[string]string)
	}

	kv.identifySeqs = []map[int64]int64{}
	for i := 0; i < shardctrler.NShards; i++ {
		kv.identifySeqs = append(kv.identifySeqs, make(map[int64]int64))
	}

	labgob.Register(shardctrler.Config{})
	labgob.Register(KeyValue{})
	labgob.Register(SaveShardLogArgs{})
	labgob.Register(MapShard2Gid{})
	labgob.Register(AskShardDataToSendArgs{})

	// Use something like this to talk to the shardctrler:
	// kv.mck = shardctrler.MakeClerk(kv.ctrlers)
	kv.applyIndex = 0
	kv.fromIndex = 0
	kv.ClientId = nrand()
	kv.applyCh = make(chan raft.ApplyMsg)
	kv.rf = raft.Make(servers, me, persister, kv.applyCh)
	kv.needUpdateConfigCh = make(chan bool, 32)
	kv.replyChannels = []chan ApplyResult{nil}
	kv.serverPersistCh = make(chan bool, 16)

	kv.applySnapshotData(kv.rf.SnapshotForServer())

	go kv.qureyConfigDaemon()
	go kv.applyLogDaemon()
	go kv.handoffDiffDaemon()
	go kv.doSnapshotOnceDaemon()

	return kv
}
