package raft

//
// this is an outline of the API that raft must expose to
// the service (or tester). see comments below for
// each of these functions for more details.
//
// rf = Make(...)
//   create a new Raft server.
// rf.Start(command interface{}) (index, term, isleader)
//   start agreement on a new log entry
// rf.GetState() (term, isLeader)
//   ask a Raft for its current term, and whether it thinks it is leader
// ApplyMsg
//   each time a new entry is committed to the log, each Raft peer
//   should send an ApplyMsg to the service (or tester)
//   in the same server.
//

import (
	"6.824/labgob"
	"bytes"
	"container/list"
	"fmt"
	"log"
	"strconv"

	//"debug/dwarf"
	"math/rand"
	"time"

	//	"bytes"
	sync "github.com/sasha-s/go-deadlock"
	//"sync"
	"sync/atomic"
	//	"6.824/labgob"
	"6.824/labrpc"
)

//
// as each Raft peer becomes aware that successive log entries are
// committed, the peer should send an ApplyMsg to the service (or
// tester) on the same server, via the applyCh passed to Make(). set
// CommandValid to true to indicate that the ApplyMsg contains a newly
// committed log entry.
//
// in part 2D you'll want to send other kinds of messages (e.g.,
// snapshots) on the applyCh, but set CommandValid to false for these
// other uses.
//
type ApplyMsg struct {
	CommandValid bool
	Command      interface{}
	CommandIndex int

	// For 2D:
	SnapshotValid bool
	Snapshot      []byte
	SnapshotTerm  int
	SnapshotIndex int
}

type Entry struct {
	Term    int
	Index   int
	Command interface{}
}

const (
	FOLLOWER = iota
	CANDIDATE
	LEADER
)

func StatusToStr(status int) string {
	var statusStr string
	switch status {
	case FOLLOWER:
		statusStr = "FOLLOWER"
	case CANDIDATE:
		statusStr = "CANDIDATE"
	case LEADER:
		statusStr = "LEADER"
	}
	return statusStr
}

const ELECTION_TIMEOUT_BASE time.Duration = 220 * time.Millisecond
const ELECTION_TIMEOUT_RANGE = 150
const HEARTBEAT_INTERVAL time.Duration = 100 * time.Millisecond

type doneAndStopGo struct {
	DoneChan chan int
	StopChan chan int
}

type votedInfo struct {
	Id   int
	Term int
}

//
// A Go object implementing a single Raft peer.
//
type Raft struct {
	mu        sync.RWMutex        // Lock to protect shared access to this peer's state
	peers     []*labrpc.ClientEnd // RPC end points of all peers
	outers    []int               // 排除在集群之外的server的index
	persister *Persister          // Object to hold this peer's persisted state
	me        int                 // this peer's index into peers[]
	leader    int                 // leader的索引
	dead      int32               // set by Kill()

	// Your data here (2A, 2B, 2C).
	// Look at the paper's Figure 2 for a description of what
	// state a Raft server must maintain.
	status     int
	votesCount int
	// Persistent state on all servers
	currentTerm                                                              int
	votedFor                                                                 *votedInfo
	logs                                                                     []Entry
	logsTermFirstIndex                                                       []int
	statusMutex, currentTermMutex, votedForMutex, votesCountMutex, logsMutex sync.RWMutex

	// Volatile state on all servers
	commitIndex int
	lastApplied int

	// Volatile state on leaders
	nextIndex  []int
	matchIndex []int

	applyChan              chan ApplyMsg
	appendEntriesRecvdChan chan int
	statusChgChan          chan int
	votedChan              chan int
	killedChan             chan int
	readyForInterrupt      int32
	// votes routines related
	votesValid   int32 // 不用
	votesGoChan  *doneAndStopGo
	votesGoMutex sync.RWMutex
	// hb routines related
	hbGoChan  *doneAndStopGo
	hbGoMutex sync.RWMutex
	// interruption
	intCond sync.Cond
	goOn    bool
	// could be appended in concurrent situation
	isCopying  int32
	startCond  sync.Cond
	appendChan chan int

	// for debug
	serverPrefix string
	prefix       string
	prefixMutex  sync.RWMutex
	dCurTerm     uint64

	// for GetState
	//LeaderNum  int
	//stateMutex sync.RWMutex

	// for log compact
	lastIncludedIndex int
	realIndexInLogs   map[int]int
}

func (rf *Raft) getGoOn() bool {
	rf.intCond.L.Lock()
	goOn := rf.goOn
	rf.intCond.L.Unlock()
	return goOn
}

func (rf *Raft) setGoOn(goOn bool) {
	rf.intCond.L.Lock()
	rf.goOn = goOn
	rf.intCond.L.Unlock()
}

func (rf *Raft) getPrefix() string {
	term := atomic.LoadUint64(&rf.dCurTerm)
	rf.prefixMutex.RLock()
	pf := rf.prefix + fmt.Sprintf("_T%d", term)
	if ShowDead && rf.killed() {
		pf += "(DEAD)"
	}
	pf += ": "
	rf.prefixMutex.RUnlock()
	return pf
}

func (rf *Raft) setPrefix(pf string) {
	if !Debug {
		return
	}
	rf.prefixMutex.Lock()
	rf.prefix = rf.serverPrefix + pf
	rf.prefixMutex.Unlock()
}

func (rf *Raft) dPrintf(format string, a ...interface{}) {
	if !Debug {
		return
	}
	DPrintf(rf.getPrefix()+format, a...)
}

// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
	rf.mu.RLock()
	defer rf.mu.RUnlock()
	term := rf.currentTerm
	isLeader := rf.leader == rf.me
	// Your code here (2A).
	return term, isLeader
}

func (rf *Raft) getLastEntryIndex(needLock bool) int {
	if needLock {
		rf.mu.RLock()
		defer rf.mu.RUnlock()
	}
	return rf.logs[len(rf.logs)-1].Index
}

func (rf *Raft) leaderWork() {
	taskChan := make(chan *AppendTask)
	go rf.appendTaskProcessor(&taskChan)
	go rf.appendTaskGenerator(&taskChan)
}

func (rf *Raft) initStatus(status int, needLock bool) {
	if needLock {
		rf.mu.Lock()
		defer rf.mu.Unlock()
	}
	switch status {
	case FOLLOWER:
		rf.votedFor = nil
		rf.votesCount = 0
		//rf.nextIndex = nil
		//rf.matchIndex = nil
		//rf.leader = -1
	case CANDIDATE:
		rf.incTerm(false)
		rf.leader = -1
		rf.votedFor = &votedInfo{
			rf.me,
			rf.getTerm(false),
		}
		rf.votesCount = 1
		//rf.nextIndex = nil
		//rf.matchIndex = nil
	case LEADER:
		rf.leader = rf.me
		rf.votedFor = nil
		rf.votesCount = 0
		for i := range rf.nextIndex {
			rf.nextIndex[i] = rf.getLastEntryIndex(false) + 1
		}
		rf.matchIndex = make([]int, len(rf.peers))
		//for rf.appendChan != nil {
		//	time.Sleep(2 * time.Millisecond)
		//}
		//rf.appendChan = make(chan int, 1)
		// 清空之前可能残存的数据
		for len(rf.appendChan) > 0 {
			<-rf.appendChan
		}
	}
	//rf.persist(false)
	rf.dPrintf("initStatus, status %s init finished", StatusToStr(status))
}

func (rf *Raft) changeStatus(toStatus int, needLock bool, needInit bool, needSendToChan bool) {
	if needLock {
		rf.mu.Lock()
		defer rf.mu.Unlock()
	}

	// 结束旧的身份下运行的一些routines
	if needSendToChan {
		rf.sendToChan(&rf.statusChgChan, toStatus, true)
		rf.dPrintf("changeStatus, send sig to statusChgChan")
		rf.intCond.Broadcast()
		rf.dPrintf("changeStatus, broadcast to interrupt")
	}

	rf.dPrintf("changeStatus, changing to %s", StatusToStr(toStatus))
	rf.status = toStatus
	rf.setPrefix(StatusToStr(toStatus))

	if needInit {
		rf.initStatus(toStatus, false)
	}
	if toStatus == LEADER {
		rf.leaderWork()
		// 发心跳
		rf.appendChan <- 0
	}
}

func (rf *Raft) sendToChan(ch *chan int, info int, checkInterrupt bool) {
	if (!checkInterrupt || atomic.LoadInt32(&rf.readyForInterrupt) == 1) && ch != nil {
		*ch <- info
	}
}

// Term get/set
func (rf *Raft) getTerm(needLock bool) int {
	if needLock {
		rf.mu.RLock()
		defer rf.mu.RUnlock()

	}
	return rf.currentTerm
}

func (rf *Raft) incTerm(needLock bool) {
	if needLock {
		rf.mu.Lock()
		defer rf.mu.Unlock()
	}
	rf.storeTerm(rf.currentTerm+1, false)
}

func (rf *Raft) storeTerm(newTerm int, needLock bool) {
	if needLock {
		rf.mu.Lock()
		defer rf.mu.Unlock()
	}
	d := newTerm + 1 - len(rf.logsTermFirstIndex)
	if d > 0 {
		rf.logsTermFirstIndex = append(rf.logsTermFirstIndex, make([]int, d)...)
		rf.dPrintf("storeTerm, logsTermFirstIndex recorded to Term %d", len(rf.logsTermFirstIndex)-1)
	}
	if rf.currentTerm < newTerm {
		rf.currentTerm = newTerm
		atomic.StoreUint64(&rf.dCurTerm, uint64(newTerm))
	}
}

// index是实际的坐标，非logs的下标；
// Entry里的Index只会大于等于logs里的下标；
// 返回logs里的下标
func (rf *Raft) getIndexFromEntry2Logs(index int, needLock bool) int {
	if needLock {
		rf.mu.RLock()
		defer rf.mu.RUnlock()
	}
	count := len(rf.logs)
	lastEntryIndex := rf.getLastEntryIndex(false)
	// index可以大于count, 但不能大于最后一个entry的index
	if index > lastEntryIndex {
		// 越界
		return index - lastEntryIndex + count - 1
	}
	// 此时没有快照
	if index < count && rf.logs[index].Index == index {
		return index
	}

	l, r := 1, count-1
	for l < r {
		mid := (l + r) >> 1
		mi := rf.logs[mid].Index
		if mi == index {
			return mid
		} else if mi > index {
			r = mid
		} else {
			l = mid + 1
		}
	}

	return l
}

//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist(needLock bool) {
	// Your code here (2C).
	// Example:
	w := new(bytes.Buffer)
	e := labgob.NewEncoder(w)
	if needLock {
		rf.mu.RLock()
		defer rf.mu.RUnlock()
	}

	var votedFor votedInfo
	if rf.votedFor != nil {
		votedFor = *rf.votedFor
	} else {
		votedFor = votedInfo{
			-1,
			0,
		}
	}
	if e.Encode(rf.getTerm(false)) != nil ||
		e.Encode(votedFor) != nil ||
		e.Encode(rf.logs) != nil ||
		e.Encode(rf.logsTermFirstIndex) != nil {
		rf.dPrintf("persist, encode failed")
		return
	}
	data := w.Bytes()
	rf.persister.SaveRaftState(data)
	rf.dPrintf("persist, Successfully")
}

//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
	if data == nil || len(data) < 1 { // bootstrap without any state?
		return
	}
	// Your code here (2C).
	rf.mu.Lock()
	defer rf.mu.Unlock()
	r := bytes.NewBuffer(data)
	d := labgob.NewDecoder(r)
	var curTerm int
	votedFor := new(votedInfo)
	var logs []Entry
	var logsTermIndex []int
	err1 := d.Decode(&curTerm)
	err2 := d.Decode(votedFor)
	err3 := d.Decode(&logs)
	err4 := d.Decode(&logsTermIndex)
	if err1 != nil ||
		err2 != nil ||
		err3 != nil ||
		err4 != nil {
		rf.dPrintf("readPersist, decode failed, curTerm: %v, votedFor: %v, logs: %v", err1, err2, err3)
		return
	}
	rf.storeTerm(curTerm, false)
	if votedFor.Id == -1 {
		rf.votedFor = nil
	} else {
		rf.votedFor = votedFor
	}
	rf.logs = logs
	rf.logsTermFirstIndex = logsTermIndex
	rf.dPrintf("readPersist, Successfully")
}

//
// A service wants to switch to snapshot.  Only do so if Raft hasn't
// have more recent info since it communicate the snapshot on applyCh.
//
func (rf *Raft) CondInstallSnapshot(lastIncludedTerm int, lastIncludedIndex int, snapshot []byte) bool {
	// Your code here (2D).
	rf.mu.Lock()
	defer rf.mu.Unlock()
	if lastIncludedIndex <= rf.lastIncludedIndex {
		return false
	}
	i := rf.getIndexFromEntry2Logs(lastIncludedIndex, false)
	if i == 0 {
		return false
	}

	curTerm := rf.getTerm(false)

	buf := bytes.NewBuffer(snapshot)
	d := labgob.NewDecoder(buf)
	var cmd interface{}
	cmd = d.Decode(&cmd)
	entry := &rf.logs[i]
	term := entry.Term
	// 构造快照
	snapshotEntry := Entry{
		term,
		lastIncludedIndex,
		cmd,
	}

	conflict := lastIncludedTerm != rf.logs[i].Term

	// 如果有冲突，也清除掉后面的
	if i < len(rf.logs)-1 && !conflict {
		rf.logs = append([]Entry{rf.logs[0], snapshotEntry}, rf.logs[i+1:]...)
	} else {
		rf.logs = []Entry{rf.logs[0], snapshotEntry}
	}
	if term < curTerm && !conflict {
		rf.logsTermFirstIndex = append(make([]int, term+1), rf.logsTermFirstIndex[term+1:]...)
	} else {
		rf.logsTermFirstIndex = make([]int, term+1)
	}
	// index对应的term的第一个entry，即本snapshot，logs的下标为1
	rf.logsTermFirstIndex[term] = 1
	// 更新最新的snapshot的lastIndex
	rf.lastIncludedIndex = lastIncludedIndex

	return true
}

// the service says it has created a snapshot that has
// all info up to and including index. this means the
// service no longer needs the log through (and including)
// that index. Raft should now trim its log as much as possible.
func (rf *Raft) Snapshot(index int, snapshot []byte) {
	if rf.killed() {
		return
	}
	// Your code here (2D).
	rf.mu.Lock()
	defer rf.mu.Unlock()
	// 判断index是否比最近的快照的大
	if index <= rf.lastIncludedIndex || index > rf.getLastEntryIndex(false) {
		return
	}

	curTerm := rf.getTerm(false)

	buf := bytes.NewBuffer(snapshot)
	d := labgob.NewDecoder(buf)
	var cmd interface{}
	cmd = d.Decode(&cmd)
	i := rf.getIndexFromEntry2Logs(index, false)
	entry := &rf.logs[i]
	term := entry.Term
	// 构造快照
	snapshotEntry := Entry{
		term,
		index,
		cmd,
	}

	// 将logs的index之前的清掉，旧的快照也清掉，再将快照插入logs里
	if i < len(rf.logs)-1 {
		rf.logs = append([]Entry{rf.logs[0], snapshotEntry}, rf.logs[i+1:]...)
	} else {
		rf.logs = []Entry{rf.logs[0], snapshotEntry}
	}
	// 清掉term之前第一个index的记录
	if term < curTerm {
		rf.logsTermFirstIndex = append(make([]int, term+1), rf.logsTermFirstIndex[term+1:]...)
	} else {
		rf.logsTermFirstIndex = make([]int, term+1)
	}
	// index对应的term的第一个entry，即本snapshot，logs的下标为1
	rf.logsTermFirstIndex[term] = 1
	// 更新最新的snapshot的lastIndex
	rf.lastIncludedIndex = index
}

type InstallSnapshotArgs struct {
	Term              int
	LeaderId          int
	LastIncludedIndex int
	LastIncludedTerm  int
	Snapshot          []byte
}

type InstallSnapshotReply struct {
	Term int
}

func (rf *Raft) sendInstallSnapshot(server int, args *InstallSnapshotArgs, reply *InstallSnapshotReply) bool {
	ok := rf.peers[server].Call("Raft.InstallSnapshot", args, reply)
	return ok
}

func (rf *Raft) InstallSnapshot(args *InstallSnapshotArgs, reply *InstallSnapshotReply) {
	rf.mu.Lock()
	defer rf.mu.Unlock()

	curTerm := rf.getTerm(false)
	if args == nil {
		reply.Term = curTerm
		return
	}

	// 更新term
	if args.Term < curTerm {
		// leader的term比该Server的小，则返回 [1]
		reply.Term = curTerm
		rf.dPrintf("InstallSnapshot, its currentTerm is %d, > %d of leader", curTerm, args.Term)
		return
	} else {
		// leader的term >= 该Server的
		if curTerm < args.Term {
			// leader的term > 该Server的，更新
			rf.storeTerm(args.Term, false)
			curTerm = args.Term
			rf.dPrintf("InstallSnapshot, its currentTerm updated from %d to %d", curTerm, args.Term)
			//isTermFirst = true
		}
		reply.Term = args.Term
	}
	// 更新leader id
	rf.leader = args.LeaderId
	// 此时rf.currentTerm = args.Term
	switch rf.status {
	case FOLLOWER:
		rf.sendToChan(&rf.appendEntriesRecvdChan, 1, true)
		if rf.votedFor != nil {
			// 第一次收到Leader的RPC，需重置vote相關變量
			rf.dPrintf("InstallSnapshot, first time recvd AppendEntries from leader")
			rf.initStatus(FOLLOWER, false)
		}
	case CANDIDATE, LEADER:
		// 不是Follower，应该退为Follower [2]
		rf.dPrintf("InstallSnapshot, its currentTerm <= leader's, should back to FOLLOWER")
		rf.changeStatus(FOLLOWER, false, true, true)
	}

	if len(args.Snapshot) == 0 {
		return
	}

	applyMsg := ApplyMsg{
		false,
		nil,
		0,
		true,
		args.Snapshot,
		args.LastIncludedTerm,
		args.LastIncludedIndex,
	}

	rf.applyChan <- applyMsg
}

//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
	// Your data here (2A, 2B).
	Term         int
	CandidateId  int
	LastLogIndex int
	LastLogTerm  int
}

//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
	// Your data here (2A).
	Term        int
	VoteGranted bool
}

//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
	// Your code here (2A, 2B).

	rf.mu.Lock()

	defer rf.mu.Unlock()

	rf.dPrintf("RequestVote, get vote request from S%d", args.CandidateId)
	// 更新term
	curTerm := rf.getTerm(false)
	if args.Term < curTerm {
		reply.VoteGranted = false
		reply.Term = curTerm
		rf.dPrintf("RequestVote, its currentTerm > S%d's", args.CandidateId)
		return
	}
	// 此时以及后面的条件都是 args.Term >= rf.currentTerm
	reply.Term = args.Term
	needSendToVotedChan := true
	if args.Term > curTerm {
		rf.storeTerm(args.Term, false)
		rf.dPrintf("RequestVote, its currentTerm < S%d's, so updated from %d to %d", args.CandidateId, curTerm, args.Term)
		curTerm = args.Term
		if rf.status != FOLLOWER {
			rf.leader = -1
			rf.changeStatus(FOLLOWER, false, true, true)
			needSendToVotedChan = false
			//return
		}
	} else if rf.status == LEADER {
		// term相等，并且自身是leader时，应拒绝掉
		reply.VoteGranted = false
		return
	}

	switch rf.status {
	// 可能本来就是Follower；也可能是currentTerm比较旧的Leader/Candidate退回的（此时last entry的Term一定小于对方的）
	case FOLLOWER:
		// 如果已投票，并且投的不是对方，且投的Server的Term不小于对方，那么就不投了
		if rf.votedFor != nil && rf.votedFor.Id != args.CandidateId && rf.votedFor.Term >= args.Term {
			rf.dPrintf("RequestVote, voted yet, won't vote to S%d", args.CandidateId)
			reply.VoteGranted = false
			break
		}
		// 比较最後一個Entry的新旧，吾新，则拒绝投票
		entry := rf.logs[len(rf.logs)-1]
		// 当前任期不等于对方时
		if entry.Term != args.LastLogTerm {
			// 小于对方时
			if entry.Term < args.LastLogTerm {
				rf.dPrintf("RequestVote, its last log entry term < S%d's", args.CandidateId)
				// todo 重复投票的情况？
				rf.dPrintf("RequestVote, so VOTE", args.CandidateId)
				reply.VoteGranted = true
				rf.votedFor = &votedInfo{
					args.CandidateId,
					args.Term,
				}
			} else {
				// 大于对方时
				rf.dPrintf("RequestVote, its last log entry term >= S%d's, won't vote", args.CandidateId)
				//reply.Term = rf.currentTerm
				reply.VoteGranted = false
			}
		} else {
			// 任期相等时，比较last index大小
			rf.dPrintf("RequestVote, its last log entry term is the same as S%d's", args.CandidateId)
			if entry.Index <= args.LastLogIndex {
				// index不比對方的大
				rf.dPrintf("RequestVote, its last log entry index <= S%d's, so VOTE", args.CandidateId)
				reply.VoteGranted = true
				rf.votedFor = &votedInfo{
					args.CandidateId,
					args.Term,
				}
			} else {
				// index比對方的大，不給對方投票
				rf.dPrintf("RequestVote, its last log entry index > S%d's, so not vote", args.CandidateId)
				reply.VoteGranted = false
			}
		}
		if needSendToVotedChan && reply.VoteGranted == true {
			// reset timer
			rf.sendToChan(&rf.votedChan, args.CandidateId, true)
		}
	case CANDIDATE:
		reply.VoteGranted = false
	case LEADER:
	}

	if reply.VoteGranted == true {
		rf.persist(false)
		rf.dPrintf("RequestVote, persist states")
	}
}

//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return.  Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
	//rf.persist(true)
	////rf.dPrintf("sendRequestVote, persist states before")
	ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
	return ok
}

type AppendTask struct {
	Term       int
	RightIndex int
}

type AppendEntriesArgs struct {
	Term         int
	LeaderId     int
	PrevLogIndex int //新加的里面的第一个Entry 之前的Entry的index，也就是最后一个旧的Entry的index
	PrevLogTerm  int
	Entries      []Entry
	LeaderCommit int
}

type AppendEntriesReply struct {
	Term         int
	Success      bool
	ConflictTerm int
	FirstIndex   int
}

// 接收方收到Entries后进行追加的handler
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
	reply.ConflictTerm = -1
	reply.FirstIndex = -1
	reply.Success = true

	rf.mu.Lock()
	defer rf.mu.Unlock()
	curTerm := rf.getTerm(false)
	//isTermFirst := false
	// 更新term
	if args.Term < curTerm {
		// leader的term比该Server的小，则返回 [1]
		reply.Term = curTerm
		rf.dPrintf("AppendEntries, its currentTerm is %d, > %d of leader", curTerm, args.Term)
		return
	} else {
		// leader的term >= 该Server的
		if curTerm < args.Term {
			// leader的term > 该Server的，更新
			rf.storeTerm(args.Term, false)
			curTerm = args.Term
			rf.dPrintf("AppendEntries, its currentTerm updated from %d to %d", curTerm, args.Term)
			//isTermFirst = true
		}
		reply.Term = args.Term
	}
	// 更新leader id
	rf.leader = args.LeaderId
	// 此时rf.currentTerm = args.Term
	switch rf.status {
	case FOLLOWER:
		rf.sendToChan(&rf.appendEntriesRecvdChan, 1, true)
		if rf.votedFor == nil || rf.votedFor.Id != rf.leader {
			// 第一次收到Leader的AppendEntries，若votedFor为空，置为该leader
			rf.votedFor = &votedInfo{
				rf.leader,
				curTerm,
			}
			rf.dPrintf("AppendEntries, first time recvd AppendEntries from leader")
		}
	case CANDIDATE, LEADER:
		// 不是Follower，应该退为Follower [2]
		rf.dPrintf("AppendEntries, its currentTerm <= leader's, should back to FOLLOWER")
		rf.changeStatus(FOLLOWER, false, true, true)
		// 变成Follower后，按照正常Follower的处理流程
	}

	// 此處開始身份是Follower，該RPC有效
	rf.dPrintf("AppendEntries, it's a valid RPC")

	// 检查一致性，比较前一个条目的新旧，若不同，则nextIndex回退（优化方法）
	rf.dPrintf("AppendEntries, append handling...\n%#v", *args)
	lastIndex := rf.getLastEntryIndex(false)
	// prev log entry 冲突 [3]
	iPrevInLog := rf.getIndexFromEntry2Logs(args.PrevLogIndex, false)
	if args.PrevLogIndex > lastIndex || rf.logs[iPrevInLog].Term != args.PrevLogTerm {
		if Debug {
			var thisTermStr string
			if args.PrevLogIndex > lastIndex {
				thisTermStr = "NULL"
			} else {
				thisTermStr = strconv.Itoa(rf.logs[iPrevInLog].Term)
			}
			rf.dPrintf("AppendEntries, Conflict, PrevLogIndex is %d, this related term is %s", args.PrevLogIndex, thisTermStr)
		}
		if args.PrevLogIndex > lastIndex {
			reply.ConflictTerm = args.PrevLogTerm
			reply.FirstIndex = lastIndex + 1
		} else {
			reply.ConflictTerm = rf.logs[iPrevInLog].Term
			reply.FirstIndex = rf.logsTermFirstIndex[reply.ConflictTerm]
			// 冲突之后的entry全删除
			if reply.FirstIndex > 0 {
				iFirstInLog := rf.getIndexFromEntry2Logs(reply.FirstIndex, false)
				rf.logs = rf.logs[:iFirstInLog]
				rf.logsTermFirstIndex = append(rf.logsTermFirstIndex[:reply.ConflictTerm],
					make([]int, len(rf.logsTermFirstIndex)-reply.ConflictTerm)...)
			}
		}
		reply.Success = false
	} else if len(args.Entries) > 0 {
		// 追加到本机的nextIndex及其之后
		rf.dPrintf("AppendEntries, no conflict, remove entries after PrevLogIndex %d", args.PrevLogIndex)

		lastAppendEntry := &args.Entries[len(args.Entries)-1]
		// 若prevLog是本任期的，并且append的最后一个entry的index 小于 当前follower的最后一个的index，则不用处理
		if rf.logs[iPrevInLog].Term != rf.getTerm(false) || lastAppendEntry.Index > rf.getLastEntryIndex(false) {
			// 截取左半部分
			rf.logs = rf.logs[:iPrevInLog+1]
			lastTerm := rf.logs[iPrevInLog].Term
			d := lastAppendEntry.Term - lastTerm
			if d > 0 {
				rf.logsTermFirstIndex = append(rf.logsTermFirstIndex[:lastTerm+1], make([]int, d)...)
			}
			// 从args.PrevLogIndex之后的条目的任期和原先的可能根本不一样，应该要遍历后面的条目，检查每一个任期的第一个index
			for _, entry := range args.Entries {
				if entry.Term != lastTerm {
					rf.logsTermFirstIndex[entry.Term] = entry.Index
					lastTerm = entry.Term
				}
			}

			rf.logs = append(rf.logs, args.Entries...)
			rf.dPrintf("AppendEntries, Append Entries Successfully. Now Logs = %+v", rf.logs)
		}
	}

	rf.dPrintf("AppendEntries, persist states")
	rf.persist(false)

	// leaderCommit之前的都要apply
	// If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)
	if reply.Success {
		// 此时，能保证一致性的前提
		if args.LeaderCommit > rf.commitIndex {
			// leaderCommit之前的说明都已经在其它地方过半了，是committed，所以这里要更新
			rf.commitIndex = Min(args.LeaderCommit, rf.getLastEntryIndex(false))
			rf.dPrintf("AppendEntries, updated commitIndex to %d", rf.commitIndex)
		}
		if rf.lastApplied >= rf.commitIndex {
			return
		}
		// 随后需要对这些进行新标记为committed的entries进行apply
		rf.dPrintf("AppendEntries, going to apply...")
		applyMsg := ApplyMsg{
			true,
			nil,
			0,
			false,
			nil,
			0,
			0,
		}
		iLastApplyInLog := rf.getIndexFromEntry2Logs(rf.lastApplied, false)
		iCommitInLog := rf.getIndexFromEntry2Logs(rf.commitIndex, false)
		for i := iLastApplyInLog + 1; i <= iCommitInLog; i++ {
			// apply到状态机
			applyMsg.Command = rf.logs[i].Command
			applyMsg.CommandIndex = i
			rf.applyChan <- applyMsg
			rf.lastApplied = i
			rf.dPrintf("AppendEntries, applied cmd%d successfully", i)
		}
	}
}

// 发送方发送Entries
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
	//rf.persist(true)
	////rf.dPrintf("sendAppendEntries, persist states before")
	ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
	return ok
}

//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
	// Your code here (2B).
	term, isLeader := rf.GetState()
	if !isLeader || rf.killed() {
		return -1, term, isLeader
	}

	// 以下都是leader
	intChan := make(chan struct{})
	doneChan := make(chan struct{})
	var goOn int32 = 1

	go func() {
		rf.intCond.L.Lock()
		rf.intCond.Wait()
		rf.intCond.L.Unlock()
		close(intChan)
	}()

	var index int
	var index1 = -1
	go func() {
		rf.mu.Lock()
		if rf.leader == rf.me && atomic.LoadInt32(&goOn) == 1 {
			//var commits uint32 = 0
			// 新加的log的index
			index1 = rf.getLastEntryIndex(false) + 1
			// 记入自己的log
			term = rf.getTerm(false)
			entry := Entry{
				term,
				index1,
				command,
			}
			if atomic.LoadInt32(&goOn) == 1 {
				rf.logs = append(rf.logs, entry)
				rf.logsTermFirstIndex[term] = index1
				rf.dPrintf("Start, appended entry in leader, %#v", entry)
				rf.persist(false)
				rf.dPrintf("Start, persist states in leader")
			}
			//atomic.AddUint32(&commits, 1)
		}
		rf.mu.Unlock()
		close(doneChan)
		//atomic.StoreInt32(&rf.isCopying, 0)
	}()

	select {
	case <-doneChan:
		index = index1
	case <-intChan:
		atomic.CompareAndSwapInt32(&goOn, 1, 0)
		index = -1
		rf.dPrintf("Start, Interrupted")
	}

	if index == -1 {
		isLeader = false
	}

	rf.appendChan <- index

	return index, term, isLeader
}

func (rf *Raft) appendTaskGenerator(taskChan *chan *AppendTask) {
	intChan := make(chan struct{})
	doneChan := make(chan struct{})
	var goOn int32 = 1

	go func() {
		rf.intCond.L.Lock()
		rf.intCond.Wait()
		rf.intCond.L.Unlock()
		close(intChan)
	}()

	var recvdSignal int64 = -2
	var index int64 = 0
	var lastIndex int64 = 0

	// 获取append的entry的index或心跳/kill信号
	go func() {
		for {
			aIndex := int64(<-rf.appendChan)
			rf.dPrintf("appendTaskGenerator, recv index %d from appendChan", aIndex)
			if aIndex > 0 {
				if aIndex > lastIndex {
					atomic.StoreInt64(&index, aIndex)
					lastIndex = aIndex
				}
			} else {
				// 如果此时signalIndex为-2，也就是无待处理的信号，那么便可以交换
				if !atomic.CompareAndSwapInt64(&recvdSignal, -2, aIndex) {
					// 如果不为-2，则只能将0换为-1；若有-1待处理，则不交换（kill信号有更高优先级）
					atomic.CompareAndSwapInt64(&recvdSignal, 0, aIndex)
				}
				if aIndex == -1 {
					return
				}
			}
		}
	}()

	go func() {
		for {
			aIndex := 0
			signal := atomic.LoadInt64(&recvdSignal)
			// 取出便置为无，以便新的信号写进来；如果此时有重复的信号进来（即也为signal），则会忽略
			atomic.CompareAndSwapInt64(&recvdSignal, signal, -2)
			// 无信号时
			if signal == -2 {
				// 取完将其置为无
				appendIndex := atomic.LoadInt64(&index)
				// 这间隙，如果有同样的index进来，也是忽略
				atomic.CompareAndSwapInt64(&index, appendIndex, 0)
				// 有appended entries
				if appendIndex != 0 {
					aIndex = int(appendIndex)
				} else {
					continue
				}
			} else {
				// 有信号时，优先处理
				aIndex = int(signal)
			}

			if aIndex < 0 {
				break
			}

			rf.dPrintf("appendTaskGenerator, going to handle aIndex: %d", aIndex)

			task := &AppendTask{
				rf.getTerm(true),
				aIndex,
			}
			if atomic.LoadInt32(&goOn) == 1 {
				rf.dPrintf("appendTaskGenerator, going to send task %+v", *task)
				*taskChan <- task
				if aIndex > 0 {
					time.Sleep(3 * time.Millisecond)
				}
			}
		}
		close(doneChan)
	}()

	<-intChan
	atomic.CompareAndSwapInt32(&goOn, 1, 0)
	rf.dPrintf("appendTaskGenerator, Interrupted")
	rf.appendChan <- -1
	<-doneChan

	close(*taskChan)
	rf.dPrintf("appendTaskGenerator, Exit")
}

func (rf *Raft) appendTaskProcessor(taskChan *chan *AppendTask) {
	rf.mu.RLock()
	tasks := make([]list.List, len(rf.peers))
	tasksDoneChan := make(chan [2]int, len(rf.peers)*2)
	rf.mu.RUnlock()
	//var tasksDoneChanMutex sync.Mutex
	var copyHandlersNum uint32
	var commits sync.Map
	var tasksMutex sync.RWMutex

	intChan := make(chan struct{})
	doneChan1 := make(chan int)
	doneChan2 := make(chan int)
	var goOn int32 = 1

	go func() {
		rf.intCond.L.Lock()
		rf.intCond.Wait()
		rf.intCond.L.Unlock()
		close(intChan)
	}()

	//getTaskCount := func(id int) int{
	//	tasksMutex.RLock()
	//	count := tasks[id].Len()
	//	tasksMutex.RUnlock()
	//	return count
	//}

	afterCommitHandler := func(id int, RightIndex int, commitCount int) {
		// 发送成功, 过半则可以apply到leader自己
		if atomic.LoadInt32(&goOn) == 1 {
			rf.mu.Lock()
			rf.matchIndex[id] = Max(rf.matchIndex[id], RightIndex)
			// 若过半，则更新commitIndex，并apply
			halfCount := (len(rf.peers) - len(rf.outers)) >> 1
			if atomic.LoadInt32(&goOn) == 1 && commitCount == halfCount+1 {
				rf.dPrintf("appendTaskProcessor::afterCommitHandler, commits just over half, going to apply...")
				if rf.commitIndex < RightIndex {
					// 若此时rf.commitIndex小于rightIndex，说明有部分还未提交，则apply到状态机
					rf.commitIndex = RightIndex
					applyMsg := ApplyMsg{
						true,
						nil,
						0,
						false,
						nil,
						0,
						0,
					}
					rf.dPrintf("appendTaskProcessor::afterCommitHandler, applying cmd[%d, %d]", rf.lastApplied+1, rf.commitIndex)
					beginLogIndex := rf.getIndexFromEntry2Logs(rf.lastApplied+1, false)
					endLogIndex := rf.getIndexFromEntry2Logs(rf.commitIndex, false)

					for i := beginLogIndex; i <= endLogIndex; i++ {
						applyMsg.Command = rf.logs[i].Command
						applyMsg.CommandIndex = i
						rf.applyChan <- applyMsg
					}
					rf.dPrintf("appendTaskProcessor::afterCommitHandler, applied cmd[%d, %d] done", rf.lastApplied, rf.commitIndex)
					rf.lastApplied = rf.commitIndex
					//for ; rf.lastApplied < rf.commitIndex; rf.lastApplied++ {
					//	applyMsg.Command = rf.logs[rf.getIndexFromEntry2Logs(rf.lastApplied+1, false)].Command
					//	applyMsg.CommandIndex = rf.lastApplied + 1
					//	rf.applyChan <- applyMsg
					//}
					//rf.lastApplied = rf.commitIndex
				}
			}
			rf.mu.Unlock()

			rf.dPrintf("appendTaskProcessor::afterCommitHandler, S%d handling done", id)
		}
	}

	copyHandler := func(id int, args *AppendEntriesArgs) {
		// 若是心跳，则为0
		RightIndex := 0
		flag := 0
		if len(args.Entries) > 0 {
			RightIndex = args.Entries[len(args.Entries)-1].Index
		} else {
			if args.PrevLogTerm != rf.getTerm(true) {
				flag = 1
			} else {
				flag = 2
			}
		}
		iRight := rf.getIndexFromEntry2Logs(RightIndex, true)
		t := 1
		if Debug && RightIndex >= args.PrevLogIndex {
			rf.dPrintf("appendTaskProcessor::copyHandler, trying to copy [%d, %d] to S%d, %#v", args.PrevLogIndex+1, RightIndex, id, *args)
		}

		rf.persist(true)
		rf.dPrintf("appendTaskProcessor::copyHandler, persisted states before send")
		// 发送
		for atomic.LoadInt32(&goOn) == 1 {
			needSleep := false
			var reply AppendEntriesReply
			// reply.Success为true的话，也可能是Term过期的情况
			ok := rf.sendAppendEntries(id, args, &reply)
			rf.mu.Lock()
			if atomic.LoadInt32(&goOn) == 1 && !ok {
				// RPC 失败
				needSleep = true
				rf.dPrintf("appendTaskProcessor::copyHandler, AppendEntries RPC to S%d failed", id)
			} else if atomic.LoadInt32(&goOn) == 1 && reply.Term > rf.getTerm(false) { // [1]
				// 可能Term落后
				atomic.CompareAndSwapInt32(&goOn, 1, 0)
				rf.storeTerm(reply.Term, false)
				rf.leader = -1
				rf.changeStatus(FOLLOWER, false, true, true)
				rf.dPrintf("appendTaskProcessor::copyHandler, not leader yet, return")
				rf.mu.Unlock()
				break
			} else if atomic.LoadInt32(&goOn) == 1 && !reply.Success { // [2],[3]
				// 如果有冲突
				// 若是心跳检测时产生了冲突
				// todo 处理心跳时的冲突
				if RightIndex == 0 {
					RightIndex = args.PrevLogIndex
					iRight = rf.getIndexFromEntry2Logs(RightIndex, false)
				}
				// todo 优化对么？
				iNextIndex := Max(1, Min(reply.FirstIndex, rf.logsTermFirstIndex[reply.ConflictTerm]))
				iLog := rf.getIndexFromEntry2Logs(iNextIndex, false)
				// 重新准备数据
				args = &AppendEntriesArgs{
					rf.getTerm(false),
					rf.leader,
					iNextIndex - 1,
					rf.logs[iLog-1].Term,
					rf.logs[iLog : iRight+1],
					rf.commitIndex,
				}
				rf.dPrintf("appendTaskProcessor::copyHandler, there has Conflict from S%d, prepare new data [%d, %d], %#v", id, args.PrevLogIndex+1, RightIndex, *args)
			} else {
				// 不冲突
				rf.mu.Unlock()
				break
			}
			rf.mu.Unlock()

			if needSleep {
				time.Sleep(5 * time.Millisecond)
			}
			t++
			rf.dPrintf("appendTaskProcessor::copyHandler, trying the %dth time to copy to S%d", t, id)
		}
		if atomic.LoadInt32(&goOn) == 1 {
			// 心跳直接在这处理掉
			if flag > 0 {
				// 经历过冲突
				if RightIndex > 0 {
					if flag == 1 {
						rf.mu.Lock()
						rf.matchIndex[id] = Max(rf.matchIndex[id], RightIndex)
						rf.mu.Unlock()
					} else {
						if old, ok := commits.Load(RightIndex); ok {
							commits.Store(RightIndex, old.(int)+1)
							rf.dPrintf("appendTaskProcessor, task(upto %d) commits count %d now", RightIndex, old.(int)+1)
							afterCommitHandler(id, RightIndex, old.(int)+1)
						}
					}
				}
			} else {
				//tasksDoneChanMutex.Lock()
				tasksDoneChan <- [2]int{id, RightIndex}
				//tasksDoneChanMutex.Unlock()
			}
		}
		atomic.AddUint32(&copyHandlersNum, ^uint32(0))
	}

	// pop任务，执行
	go func() {
		for {
			done, ok := <-tasksDoneChan
			if !ok || atomic.LoadInt32(&goOn) == 0 {
				break
			}
			go func(Sid int, DoneRightIndex int) {
				if DoneRightIndex > 0 {
					rf.dPrintf("appendTaskProcessor, successfully copied(upto %d) to S%d", DoneRightIndex, Sid)
					if old, ok := commits.Load(DoneRightIndex); ok {
						commits.Store(DoneRightIndex, old.(int)+1)
						rf.dPrintf("appendTaskProcessor, task(upto %d) commits count %d now", DoneRightIndex, old.(int)+1)
						afterCommitHandler(Sid, DoneRightIndex, old.(int)+1)
					}

					// 完成任务，将其pop出任务队列
					tasksMutex.Lock()
					ele := tasks[Sid].Front()
					if ele != nil {
						tasks[Sid].Remove(ele)
					} else {
						tasksMutex.Unlock()
						return
					}
					tasksMutex.Unlock()
				}

				// 取下一个任务
				var args *AppendEntriesArgs = nil
				tasksMutex.Lock()
				if tasks[Sid].Len() > 0 {
					ele := tasks[Sid].Front()
					if ele != nil {
						args = ele.Value.(*AppendEntriesArgs)
					}
					tasks[Sid].Remove(ele)
				}
				tasksMutex.Unlock()
				//needRet = atomic.LoadUint64(&totalTasksCount) == 0

				if Debug && args == nil {
					rf.dPrintf("appendTaskProcessor, there is no task for S%d", Sid)
				}

				if args != nil && atomic.LoadInt32(&goOn) == 1 {
					rf.dPrintf("appendTaskProcessor, for S%d got next task %+v", Sid, *args)
					atomic.AddUint32(&copyHandlersNum, 1)
					go copyHandler(Sid, args)
				}
			}(done[0], done[1])
		}
		doneChan1 <- 1
		//}()
		//}
	}()

	// 等appendTaskGenerator发送任务过来，push进队列
	go func() {
		for atomic.LoadInt32(&goOn) == 1 {
			task, ok := <-*taskChan
			if !ok || atomic.LoadInt32(&goOn) == 0 {
				break
			}
			if task == nil {
				continue
			}
			rf.dPrintf("appendTaskProcessor, recv task %+v", *task)
			if task.RightIndex > 0 {
				commits.Store(task.RightIndex, 1)
			}

			// 分配具体任务给不同的Follower
			for i := 0; i < len(rf.peers) && atomic.LoadInt32(&goOn) == 1; i++ {
				rf.mu.Lock()
				if rf.leader == i {
					rf.mu.Unlock()
					continue
				}
				nextIndex := rf.nextIndex[i]
				if atomic.LoadInt32(&goOn) == 0 {
					rf.mu.Unlock()
					break
				}

				iLog := rf.getIndexFromEntry2Logs(nextIndex, false)
				iRightInLog := rf.getIndexFromEntry2Logs(task.RightIndex, false)
				args := &AppendEntriesArgs{
					task.Term,
					rf.leader,
					nextIndex - 1,
					rf.logs[iLog-1].Term,
					[]Entry{},
					rf.commitIndex,
				}
				if task.RightIndex >= nextIndex {
					args.Entries = rf.logs[iLog : iRightInLog+1]
					rf.nextIndex[i] = task.RightIndex + 1
				}
				rf.mu.Unlock()

				if atomic.LoadInt32(&goOn) == 1 {
					if task.RightIndex < nextIndex {
						// 若是心跳，直接发送，不进task队列
						rf.dPrintf("appendTaskProcessor, go to handle heartbeat")
						go copyHandler(i, args)
					} else {
						rf.dPrintf("appendTaskProcessor, push server task to Q%d, %+v", i, *args)
						tasksMutex.Lock()
						tasks[i].PushBack(args)
						if atomic.LoadInt32(&goOn) == 1 {
							if tasks[i].Len() == 1 {
								// -1只是为了激活tasksDoneChan
								tasksDoneChan <- [2]int{i, -1}
							}
						}
						tasksMutex.Unlock()
					}
				}
			}
		}
		doneChan2 <- 1
	}()

	<-intChan
	atomic.CompareAndSwapInt32(&goOn, 1, 0)
	rf.dPrintf("appendTaskProcessor, Interrupted")
	<-doneChan2
	for atomic.LoadUint32(&copyHandlersNum) > 0 {
		time.Sleep(2 * time.Millisecond)
	}
	//tasksDoneChanMutex.Lock()
	close(tasksDoneChan)
	//tasksDoneChanMutex.Unlock()
	<-doneChan1
	rf.dPrintf("appendTaskProcessor, Exit")
}

//
// the tester doesn't halt goroutines created by Raft after each test,
// but it does call the Kill() method. your code can use killed() to
// check whether Kill() has been called. the use of atomic avoids the
// need for a lock.
//
// the issue is that long-running goroutines use memory and may chew
// up CPU time, perhaps causing later tests to fail and generating
// confusing debug output. any goroutine with a long-running loop
// should call killed() to check whether it should stop.
//
func (rf *Raft) Kill() {
	if atomic.LoadInt32(&rf.dead) == 1 {
		return
	}
	rf.dPrintf("Kill, going to KILL...")
	atomic.StoreInt32(&rf.dead, 1)
	// Your code here, if desired.
	rf.killedChan <- 1
	rf.intCond.Broadcast()
}

func (rf *Raft) killed() bool {
	z := atomic.LoadInt32(&rf.dead)
	return z == 1
}

func (rf *Raft) stopVoteRoutines(needLock bool, needVoteLock bool) {
	if needVoteLock {
		rf.votesGoMutex.Lock()
		defer rf.votesGoMutex.Unlock()
	}

	if rf.votesGoChan == nil {
		return
	}

	rf.dPrintf("stopVoteRoutines, send to stop chan")
	if len(rf.votesGoChan.StopChan) == 0 {
		rf.votesGoChan.StopChan <- 0
	}
	//rf.sendToChan(&rf.votesGoChan.StopChan, 0, false)
}

func (rf *Raft) stopHbRoutines(needLock bool, needHbLock bool) {
	if needHbLock {
		rf.hbGoMutex.Lock()
		defer rf.hbGoMutex.Unlock()
	}

	if rf.hbGoChan == nil {
		return
	}

	rf.dPrintf("stopHbRoutines, send to stop chan")
	rf.sendToChan(&rf.hbGoChan.StopChan, 0, false)
}

func (rf *Raft) stopAllRoutines(needLock bool, needActionLock bool) {
	rf.stopVoteRoutines(needLock, needActionLock)
	//rf.stopHbRoutines(needLock, needActionLock)
}

func (rf *Raft) finishVotesGoChan(isOkToGoOn *int32) {
	rf.votesGoMutex.Lock()
	atomic.CompareAndSwapInt32(isOkToGoOn, 1, 0)
	if rf.votesGoChan != nil {
		close(rf.votesGoChan.DoneChan)
		close(rf.votesGoChan.StopChan)
		rf.votesGoChan = nil
	}
	rf.votesGoMutex.Unlock()
}

func (rf *Raft) finishHbGoChan() {
	rf.hbGoMutex.Lock()
	if rf.hbGoChan != nil {
		close(rf.hbGoChan.DoneChan)
		close(rf.hbGoChan.StopChan)
		rf.hbGoChan = nil
	}
	rf.hbGoMutex.Unlock()
}

// The ticker go routine starts a new election if this peer hasn't received heartsbeats recently.
func (rf *Raft) ticker() {
	for rf.killed() == false {
		// Your code here to check if a leader election should
		// be started and to randomize sleeping time using
		// time.Sleep().
		atomic.CompareAndSwapInt32(&rf.readyForInterrupt, 1, 0)
		toChan := make(chan struct{})
		var isOkToGoOn int32 = 1
		//var isJustNowChangedToLeader int32 = 0

		go func() {
			if rf.killed() {
				return
			}

			rf.mu.RLock()
			status := rf.status
			rf.mu.RUnlock()

			if status != LEADER {
				// 选举超时计时
				atomic.StoreInt32(&rf.readyForInterrupt, 1)
				etr0 := rand.New(rand.NewSource(int64(rf.me)))
				etr1 := rand.New(rand.NewSource(time.Now().UnixNano() + etr0.Int63()))
				rg := etr1.Intn(ELECTION_TIMEOUT_RANGE)
				timeout := time.Duration(rg)*time.Millisecond + ELECTION_TIMEOUT_BASE
				rf.dPrintf("ticker, start election timer, timeout is %d", timeout.Milliseconds())
				//fmt.Printf("ticker, nano time is %d start election timer, timeout is %d\n", time.Now().UnixNano(), timeout.Milliseconds())
				time.Sleep(timeout)
			} else {
				// 心跳发送计时
				//if atomic.LoadInt32(&isJustNowChangedToLeader) == 1 {
				//	// 刚成为leader，立刻发送心跳
				//	atomic.StoreInt32(&isJustNowChangedToLeader, 0)
				//} else {
				//	atomic.StoreInt32(&rf.readyForInterrupt, 1)
				//	rf.dPrintf("ticker, start heartbeat timer")
				//	time.Sleep(HEARTBEAT_INTERVAL)
				//}

				atomic.StoreInt32(&rf.readyForInterrupt, 1)
				rf.dPrintf("ticker, start heartbeat timer")
				time.Sleep(HEARTBEAT_INTERVAL)
			}
			close(toChan)
		}()

		select {
		case <-toChan:
			atomic.StoreInt32(&rf.readyForInterrupt, 0)
			rf.stopAllRoutines(true, true)
			rf.mu.Lock()
			switch rf.status {
			case FOLLOWER:
				atomic.CompareAndSwapInt32(&isOkToGoOn, 0, 1)
				rf.leader = -1
				rf.changeStatus(CANDIDATE, false, false, false)
				fallthrough
			case CANDIDATE:
				rf.dPrintf("ticker, Election Timeout")
				// 超时，发起选举
				rf.initStatus(CANDIDATE, false)
				// 为了尽快重新开始新的循环（重设计时）
				rf.dPrintf("ticker, init election")
				rf.mu.Unlock()

				go func() {
					if rf.killed() {
						return
					}
					rf.votesGoMutex.Lock()
					if rf.votesGoChan != nil {
						rf.votesGoChan.StopChan <- 1
						for rf.votesGoChan != nil {
							rf.votesGoMutex.Unlock()
							time.Sleep(3 * time.Millisecond)
							rf.votesGoMutex.Lock()
						}
					}
					rf.votesGoChan = &doneAndStopGo{
						DoneChan: make(chan int, 1),
						StopChan: make(chan int, 1),
					}
					voteGo := rf.votesGoChan
					rf.votesGoMutex.Unlock()
					if atomic.LoadInt32(&isOkToGoOn) == 1 {
						go func() {
							if rf.killed() {
								return
							}

							rf.mu.Lock()

							wg := sync.WaitGroup{}
							// todo exclude outers
							lastIndex := rf.getLastEntryIndex(false)
							args := RequestVoteArgs{
								Term:         rf.getTerm(false),
								CandidateId:  rf.me,
								LastLogIndex: lastIndex,
								LastLogTerm:  rf.logs[lastIndex].Term,
							}
							lastPeersCount := len(rf.peers)
							wg.Add(lastPeersCount)
							rf.dPrintf("ticker, sending vote request...")
							rf.persist(false)
							rf.dPrintf("ticker, persisted states before request votes")
							for i, _ := range rf.peers {
								valid := rf.leader != i && rf.me != i
								rf.mu.Unlock()
								if rf.killed() {
									return
								}
								if valid && atomic.LoadInt32(&isOkToGoOn) == 1 {
									// 尽快地发送RequestVote，为了并行拉票
									go func(index int) {
										if rf.killed() {
											return
										}
										var reply RequestVoteReply
										t := 1
										rf.dPrintf("ticker, try the %dst sending vote request to S%d", t, index)
										for !rf.sendRequestVote(index, &args, &reply) {
											if atomic.LoadInt32(&isOkToGoOn) != 1 {
												return
											}
											time.Sleep(10 * time.Millisecond)
											if atomic.LoadInt32(&isOkToGoOn) != 1 {
												return
											}
											reply = RequestVoteReply{}
											t += 1
											rf.dPrintf("ticker, try the %dth sending vote request to S%d", t, index)
										}
										if atomic.LoadInt32(&isOkToGoOn) != 1 || rf.killed() {
											return
										}

										rf.mu.Lock()
										d := reply.Term - rf.getTerm(false)
										if d > 0 {
											// 得到的回復是對方的Term大於自己的，所以該Candidate要退回
											rf.storeTerm(reply.Term, false)
											//rf.logsTermFirstIndex = append(rf.logsTermFirstIndex, make([]int, d)...)
											rf.changeStatus(FOLLOWER, false, true, true)
											rf.dPrintf("ticker, didn't got voted from S%d, such term of the vote reply is newer, that must back to Follower", index)
										} else if atomic.LoadInt32(&isOkToGoOn) == 1 {
											// 开始计票
											if !reply.VoteGranted {
												// 此时是被拒绝投票的
												rf.dPrintf("ticker, failed to get vote from S%d, reply term is %d", index, reply.Term)
												rf.storeTerm(reply.Term, false)
												//rf.logsTermFirstIndex = append(rf.logsTermFirstIndex, make([]int, d)...)
											} else if atomic.LoadInt32(&isOkToGoOn) == 1 {
												rf.votesCount++
												rf.dPrintf("ticker, got vote from S%d, now total votes count %d", index, rf.votesCount)
												if atomic.LoadInt32(&isOkToGoOn) == 1 && rf.votesCount > ((len(rf.peers)-len(rf.outers))>>1) {
													//rf.changeStatus(LEADER, false)
													atomic.StoreInt32(&isOkToGoOn, 0)
													rf.dPrintf("ticker, becoming LEADER")
													rf.leader = rf.me
													rf.changeStatus(LEADER, false, true, true)
												}
											}
										} else {
											rf.dPrintf("ticker, needn't count vote from S%d, since been interrupted", index)
										}
										rf.mu.Unlock()

										wg.Done()
										rf.dPrintf("ticker, vote request to S%d handling done", index)
									}(i)
								} else {
									// i为leader或me时，或者已被打断
									rf.dPrintf("ticker, needn't send vote request to S%d", i)
									wg.Done()
								}

								rf.mu.Lock()

								nowPeersCount := len(rf.peers)
								if lastPeersCount != nowPeersCount {
									wg.Add(nowPeersCount - lastPeersCount)
									lastPeersCount = nowPeersCount
								}
							}
							rf.mu.Unlock()

							wg.Wait()
							rf.votesGoMutex.Lock()
							if rf.votesGoChan != nil && atomic.LoadInt32(&isOkToGoOn) == 1 {
								voteGo.DoneChan <- 1
							}
							rf.votesGoMutex.Unlock()
						}()

						select {
						case <-voteGo.DoneChan:
							rf.finishVotesGoChan(&isOkToGoOn)
							rf.dPrintf("ticker, all vote request handling done")
						case <-voteGo.StopChan:
							rf.finishVotesGoChan(&isOkToGoOn)
							rf.dPrintf("ticker, vote routines interrupted")
						}
					}
				}()
				//continue
				//rf.mu.Unlock()

			case LEADER:
				// send heartbeat
				rf.dPrintf("ticker, Heartbeat Timeout")
				rf.mu.Unlock()
				rf.appendChan <- 0
			}

		case <-rf.statusChgChan:
			atomic.StoreInt32(&rf.readyForInterrupt, 0)
			//if status == LEADER {
			//	atomic.CompareAndSwapInt32(&isJustNowChangedToLeader, 0, 1)
			//	go rf.stopVoteRoutines(true, true)
			//} else {
			//	rf.stopAllRoutines(true, true)
			//}

			rf.stopAllRoutines(true, true)

		case <-rf.appendEntriesRecvdChan:
			// 说明收到的AppendEntries是一个有效的RPC
			atomic.StoreInt32(&rf.readyForInterrupt, 0)
			rf.stopAllRoutines(true, true)

		case <-rf.votedChan:
			// 投过票，说明RequestVote是一个有效的RPC
			atomic.StoreInt32(&rf.readyForInterrupt, 0)
			rf.stopAllRoutines(true, true)

		case <-rf.killedChan:
			// 被Kill掉了
			rf.dPrintf("ticker, killed interruption")
			atomic.StoreInt32(&rf.readyForInterrupt, 0)
			rf.stopAllRoutines(true, true)
			close(rf.killedChan)
		}

	}
	rf.dPrintf("ticker, it's killed, so return")
}

//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//
func Make(peers []*labrpc.ClientEnd, me int,
	persister *Persister, applyCh chan ApplyMsg) *Raft {
	rf := &Raft{}
	rf.peers = peers
	rf.persister = persister
	rf.me = me
	rf.leader = -1
	//rf.LeaderNum = -1
	rf.outers = make([]int, 0)
	rf.dead = 0
	rf.dCurTerm = 0

	// Your initialization code here (2A, 2B, 2C).
	rf.logs = make([]Entry, 1)
	rf.logs[0] = Entry{
		0,
		0,
		nil,
	}
	rf.logsTermFirstIndex = make([]int, 1)

	rf.status = FOLLOWER
	rf.votedFor = nil
	rf.votesCount = 0
	rf.currentTerm = 0
	rf.applyChan = applyCh
	rf.appendEntriesRecvdChan = make(chan int, 1)
	rf.statusChgChan = make(chan int, 1)
	rf.votedChan = make(chan int, 1)
	rf.killedChan = make(chan int, 1)
	rf.votesGoChan = nil
	rf.hbGoChan = nil
	rf.votesValid = 1
	rf.readyForInterrupt = 0
	rf.appendChan = make(chan int, 2)

	rf.nextIndex = make([]int, len(peers))
	rf.matchIndex = nil

	rf.commitIndex = 0
	rf.lastApplied = 0

	intLocker := new(sync.Mutex)
	rf.intCond = sync.Cond{}
	rf.intCond.L = intLocker

	rf.lastIncludedIndex = 0
	rf.realIndexInLogs = make(map[int]int, 1)
	rf.realIndexInLogs[0] = 0

	if Debug {
		rd := rand.New(rand.NewSource(time.Now().UnixNano()))
		rf.serverPrefix = fmt.Sprintf("S%d(%X)_", me, rd.Int31())
		rf.prefix = fmt.Sprintf("%s%s", rf.serverPrefix, "FOLLOWER")
		log.SetFlags(log.Ltime | log.Lmicroseconds)
	}

	// initialize from state persisted before a crash
	rf.readPersist(persister.ReadRaftState())

	// start ticker goroutine to start elections
	go rf.ticker()

	return rf
}
