

package raft

//
// this is an outline of the API that raft must expose to
// the service (or tester). see comments below for
// each of these functions for more details.
//
// rf = Make(...)
//   create a new Raft server.
// rf.Start(command interface{}) (index, Term, isleader)
//   start agreement on a new log entry
// rf.GetState() (Term, isLeader)
//   ask a Raft for its current Term, and whether it thinks it is leader
// ApplyMsg
//   each time a new entry is committed to the log, each Raft peer
//   should send an ApplyMsg to the service (or tester)
//   in the same server.
//

import (
	"bytes"
	"encoding/gob"
	"fmt"
	"labgob"
	"sync"
	"sync/atomic"
	"time"
)
import "labrpc"

// import "bytes"
// import "labgob"

const(
	LockTimeout = 10*time.Millisecond
	//RPCTimeout = 3500*time.Millisecond
	//
	//SingleRPCTimeout = 3000*time.Millisecond

	RPCTimeout = 100*time.Millisecond

	SingleRPCTimeout = 100*time.Millisecond

	HeartBeatInterval = 100 * time.Millisecond
	LogEnabled = false
	LockDetectEnabled = false
	ReqParallel = 1
	MaxRetry = 2
)

// server status enumeration
type ServerStatus string
const(
	Candidate ServerStatus="Candidate"
	Follower = "Follower"
	Leader = "Leader"
)

//
// as each Raft peer becomes aware that successive log entries are
// committed, the peer should send an ApplyMsg to the service (or
// tester) on the same server, via the applyCh passed to Make(). set
// CommandValid to true to indicate that the ApplyMsg contains a newly
// committed log entry.
//
// in Lab 3 you'll want to send other kinds of messages (e.g.,
// snapshots) on the applyCh; at that point you can add fields to
// ApplyMsg, but set CommandValid to false for these other uses.
//
type ApplyMsg struct {
	CommandValid bool
	Command      interface{}
	CommandIndex int
}
/**
	log entry of raft
 */
type Entry struct{
	Term int
	Command interface{}
}

//
// A Go object implementing a single Raft peer.
//
type Raft struct {
	mu        sync.Mutex          // Lock to protect shared access to this peer's state
	peers     []*labrpc.ClientEnd // RPC end points of all peers
	persister *Persister          // Object to hold this peer's persisted state
	me        int                 // this peer's index into peers[]

	// Your data here (2A, 2B, 2C).
	// Look at the paper's Figure 2 for a description of what
	// state a Raft server must maintain.

	//configure as <raft extended> figure2
	currentTerm int
	//map[Term]votefor
	votedFor int
	log []Entry

	commitIndex int
	lastApplied int
	nextIndex []int
	matchIndex []int


	timeContext *TimeContext
	lockName string
	lockTick time.Time
	//
	isLeader int32
	state ServerStatus

	//time about
	heart *time.Timer
	elec_timer *time.Timer
	election_timeout time.Duration
	heart_tick time.Duration
	heart_timer_initial bool
	heart_begin time.Time


	preemptedCh chan bool
	timeReset bool


	lockChan chan string
	unlockChan chan string

	leaderChan chan string
	terminateChan chan string

	logChan chan interface{}

	entryReplicaRec map[int] []int

	committedCh chan ApplyMsg

	commitIndexChangeCh chan int

	isTerminated bool

	logFd string

	prevTicker time.Time

	logsyner sync.Mutex
	logInfo string
	electionInterval time.Duration

	Votes int
	dead int32

}


func (rf *Raft)String() string{

	s:=fmt.Sprintf("{ID:%d,Term:%d,State:%s,commitIndex:%d,votedFor:%d,prevTicker:%v,length of log:%d,nextIndex:\t%v,matchIndex:\t%v,\nLog:%v,\nentryReplica:%v}\n",rf.me,rf.currentTerm,rf.state,rf.commitIndex,rf.votedFor,rf.prevTicker,len(rf.log),rf.nextIndex,rf.matchIndex,rf.log,rf.entryReplicaRec)
	return s

}
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {

	var term int
	var isleader bool
	// Your code here (2A).
	rf.lock("Raft Get State")
	term = rf.currentTerm
	isleader = rf.state==Leader
	rf.unlock("Raft Get State")
	return term, isleader
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
	// Your code here (2C).
	// Example:
	w := new(bytes.Buffer)
	e := labgob.NewEncoder(w)
	e.Encode(rf.currentTerm)
	e.Encode(rf.votedFor)
	e.Encode(rf.log)
	data := w.Bytes()
	rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
	if data == nil || len(data) < 1 { // bootstrap without any state?
		return
	}
	// Your code here (2C).
	// Example:
	r := bytes.NewBuffer(data)
	d := labgob.NewDecoder(r)
	var currentTermRecv int
	var votedForRecv int
	var logRecv []Entry
	if d.Decode(&currentTermRecv) != nil ||
		d.Decode(&votedForRecv) != nil ||
		d.Decode(&logRecv) != nil{
		panic("Error to Restore Persistent State!")
	} else {
		rf.currentTerm = currentTermRecv
		rf.votedFor = votedForRecv
		rf.log = logRecv
	}
}


type AppendEntryArgs struct {
	// Your data here (2A, 2B).
	Term         int
	LeaderId   int
	PrevLogIndex int	//used to record raft cluster log
	PrevLogTerm  int
	Entries []Entry
	LeaderCommit int
	TimeStamp string
	PrevTicker time.Time
	LastApply int
}
type AppendEntryReply struct {
	// Your data here (2A).
	Term        int
	Success bool
	//accelerate log match by treat term as a unit
	XTerm int
	XIndex int
	XLen int
	TimeStamp string
	Me int
	PrevTicker time.Time
	LastApply int
	CommitIndex int
}


type AppendEntryServ struct{
	request *AppendEntryArgs
	reply *AppendEntryReply
	index int

}



func (args *AppendEntryArgs) String() string{
	format_entry:=""
	switch(len(args.Entries)){
	case 0:
		format_entry = "[]"
		break;
	case 1:
		format_entry = fmt.Sprintf("[{%v}]",args.Entries[0])
		break;
	default:
		format_entry=fmt.Sprintf("[{%v}...[%v]]",args.Entries[0],args.Entries[len(args.Entries)-1])
		break;
	}

	return fmt.Sprintf("{TimeStamp:%s,Term:%d,\tLeaderId:%d,\tPrevTicker:%v,PrevLogIndex:%d,PrevLogTerm:%d,LeaderCommit:%d,LastApply:%d,Entries:%v}\n",args.TimeStamp,args.Term,args.LeaderId,args.PrevTicker,args.PrevLogIndex,args.PrevLogTerm,args.LeaderCommit,args.LastApply,format_entry)

}


func (reply *AppendEntryReply) String() string{

	return fmt.Sprintf("{TimeStamp:%s,Term:%d,\tMe:%d,\t\tPrevTicker:%v,Success:%v,XTerm:%d,XIndex:%d,XLen:%d,LastApply:%d,CommitIndex:%d}\n",reply.TimeStamp, reply.Term,reply.Me,reply.PrevTicker,reply.Success, reply.XTerm, reply.XIndex, reply.XLen,reply.LastApply,reply.CommitIndex)


}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
	// Your data here (2A, 2B).
	Term         int
	CandidatId   int
	LastLogIndex int
	LastLogTerm  int
	TimeStamp string
	PrevTicker time.Time
	LastApply int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
	// Your data here (2A).
	Term        int
	VoteGranted bool
	TimeStamp string
	Me int
	PrevTicker time.Time
	LastApply int
}
func (args *RequestVoteArgs) String() string{

	return fmt.Sprintf("{TimeStamp:%s,Term:%d,CandidatId:%d,PrevTicker:%v,LastLogIndex:%d,LastLogTerm:%d,LastApply:%d}",args.TimeStamp,args.Term,args.CandidatId,args.PrevTicker,args.LastLogIndex,args.LastLogTerm,args.LastApply)


}
func (reply *RequestVoteReply) String() string{

	return fmt.Sprintf("{TimeStamp:%s,Term:%d,Me:%d,PrevTicker:%v,VoteGranted:%v,LastApply:%d}",reply.TimeStamp,reply.Term,reply.Me,reply.PrevTicker,reply.VoteGranted,reply.LastApply)

}
//func getRandFuncId(){
//
//}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply){
	// Your code here (2A, 2B)
	randId:= fmt.Sprintf("ID:(RequestVote-%s)",getCurTimeStamp())
	if LogEnabled{
		rf.Log(DebugL, fmt.Sprintf("-----------Incoming RequestVote Start:ID:%s-----------\narg:%s\n", randId,args))
	}
	start:=time.Now()
	lockName:=fmt.Sprintf("{%d}.RequestVote",args.CandidatId)
	rf.lock(lockName)
	defer rf.unlock(lockName)
	defer rf.persist()
	defer func(reply *RequestVoteReply){reply.TimeStamp=getCurTimeStamp();reply.Me=rf.me;reply.PrevTicker=rf.prevTicker;reply.LastApply = rf.lastApplied
	}(reply)
	formal_log:=""
	if LogEnabled {
		formal_log+=fmt.Sprintf("duration:%v\n",time.Since(start))
		formal_log += fmt.Sprintf("-----------RequestVote Start:ID:%s-----------\narg:%s\nbefore RPC rf:%s\n", randId,args, rf)
	}

	/**
		Term act as a logical clock,Exchange Term
	 */
	reply.Term = rf.currentTerm
	/**
		Deal With Term
	*/
	if !rf.TermCheck(args.Term){
		reply.VoteGranted = false
		if LogEnabled {
			formal_log += fmt.Sprintf("reply:%v\n-----------RequestVote End-----------\n", reply)
			rf.Log(DebugL,formal_log)
		}
		return
	}
	/**
		Deal With Vote
	 */
	validTermCheck:=false
	if rf.votedFor==-1 ||rf.votedFor==args.CandidatId{
		validTermCheck = true
	}
	/**
		add term compare
	 */
	if !validTermCheck{
		reply.VoteGranted = false
		if LogEnabled {
			formal_log += fmt.Sprintf("reply:%v\n-----------RequestVote End-----------\n", reply)
			rf.Log(DebugL,formal_log)
		}
		return;
	}
	/**
		term check passed,check log
	 */
	validLogCheck:=false
	if len(rf.log)==0{									//A_{0} compare always return true
		validLogCheck=true
	}else{
		if rf.log[len(rf.log)-1].Term<args.LastLogTerm{	//A_{n} compare between follower and candidate
			validLogCheck = true
		}else{											//same term,compare log index
			if rf.log[len(rf.log)-1].Term==args.LastLogTerm && len(rf.log)<=args.LastLogIndex{
				validLogCheck = true
			}
		}
	}
	if validLogCheck&&validTermCheck{
		reply.VoteGranted = true
		rf.votedFor = args.CandidatId
		rf.changeToFollowerAndResetTimer()
		if LogEnabled {
			formal_log += "[Grant Vote to peers]:Reset Election Timer"
		}
	}else{
		reply.VoteGranted = false
	}
	if LogEnabled {
		formal_log+=fmt.Sprintf("duration:%v\n",time.Since(start))
		formal_log += fmt.Sprintf("\nAfter RPC rf:%s\n", rf)
		formal_log += fmt.Sprintf("reply:%v\n-----------RequestVote End-----------\n", reply)
	}
	rf.Log(DebugL,formal_log)
	return
}
/**
	Term is used to exchange info,logical lock
 */
func (rf *Raft)TermCheck(peerTerm int) bool{
	termCheckFlag :=false
	if peerTerm >rf.currentTerm{
		rf.state =Follower
		rf.currentTerm = peerTerm
		rf.votedFor = -1
		termCheckFlag = true
	}else{
		if peerTerm == rf.currentTerm{
			termCheckFlag = true
		}else{
			termCheckFlag = false
		}
	}
	return termCheckFlag
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) AppendEntry(args *AppendEntryArgs, reply *AppendEntryReply) {
	// Your code here (2A, 2B).
	randId:= fmt.Sprintf("ID:(AppendEntry-%s)",getCurTimeStamp())
	if LogEnabled {
		rf.Log(DebugL,fmt.Sprintf("-----------Incoming AppendEntry StartID:%s -----------\narg:%s\n",randId,args))
	}

	start:=time.Now()
	lockName:=fmt.Sprintf("{%d}.AppendEntry",args.LeaderId)
	rf.lock(lockName)
	defer rf.unlock(lockName)
	defer rf.persist()
	defer func(reply *AppendEntryReply){reply.TimeStamp=getCurTimeStamp();reply.Me=rf.me;reply.PrevTicker=rf.prevTicker;reply.LastApply = rf.lastApplied;reply.CommitIndex=rf.commitIndex}(reply)
	formal_log:=""


	/**
		Deal With Term
	*/
	if LogEnabled {
		formal_log+=fmt.Sprintf("duration:%v\n",time.Since(start))
		formal_log+=fmt.Sprintf("-----------AppendEntry StartID:%s -----------\narg:%s\nbefore RPC rf:%s\n",randId,args,rf)
	}
	reply.Term = rf.currentTerm

	if !rf.TermCheck(args.Term){
		reply.Success = false
		if LogEnabled {
			formal_log+=fmt.Sprintf("duration:%v\n",time.Since(start))
			formal_log+=fmt.Sprintf("reply:%v\n-----------AppendEntry End-----------\n",reply)
		}
		return
	}
	if args.Term>=rf.currentTerm{
		//rf.currentTerm = args.Term
		// TODO nextIndex and matchindex reset?
		rf.changeToFollowerAndResetTimer()
		if LogEnabled {
			formal_log += fmt.Sprintf("\n======Receive Leader Heart Beat,Reset Election Timer,[%d->%d]\n", args.LeaderId, rf.me)
		}
	}

	/**
		deal with log between leader and follower
	 */

	/**
		A_{n} compare
		treat A_{0}:{index:0,Term:0}
	*/
	logUpperIndex:=len(rf.log)
	//if args.PrevLogIndex==0{					//A_{0} compare alway return true
	//	rf.log = append(rf.log, args.Entries[0:]...)
	//	reply.Success = true
	//}else{										//A_{n} compare
	//
	//}
	/**
			absent or conflict
		 */
	if args.PrevLogIndex==0||(args.PrevLogIndex<=logUpperIndex && rf.log[getLogIndex((args.PrevLogIndex))].Term==args.PrevLogTerm){
		//reply.Success = true										//A_{n} matched
		///**
		//	add by alan 4-21,if rf contains args.Entries,Don't truncate log,in case of leader rpc latency
		//	log can only flow from leader to follower
		//
		// */
		//if len(rf.log)>args.PrevLogIndex && rf.log[(args.PrevLogIndex)-1].Term==args.Term {			//alert here
		//	if LogEnabled {
		//		formal_log += fmt.Sprintf("\nstale info,don't truncate:[%v->%v]\n", rf.log, rf.log[0:(args.PrevLogIndex+1)-1])
		//	}
		//	//add by alan 4-21,if rf contains args.Entries,Don't truncate log,in case of leader rpc latency
		//	reply.Success = false
		//	reply.XLen = len(rf.log)
		//}

		conflict_flag :=false
		//A_{n} matched,deal with A_{n+1:}
		for index,entry := range(args.Entries) {
			cur_leader_index := (args.PrevLogIndex+1)+index							//index is relative to A_{n+1}
			//compare joint part of leader and follower log
			if cur_leader_index <= len(rf.log) {
				if entry.Term == rf.log[getLogIndex(cur_leader_index)].Term {
					continue
				} else {
					conflict_flag = true

					rf.log = rf.log[0:getLogIndex(cur_leader_index)]
					if LogEnabled {
						formal_log += fmt.Sprintf("\ntruncate log:[%v->%v]\nconflict_flag:%v", rf.log, rf.log[0:getLogIndex(cur_leader_index)], conflict_flag)
					}
					rf.log = append(rf.log, args.Entries[index:]...)
					break;
				}
				//leader more than follower,no confict entry found
			} else {
				//if LogEnabled {
				//	formal_log += fmt.Sprintf("\ntruncate log:[%v->%v]\nconflict_flag:%v", rf.log, rf.log[0:getLogIndex(cur_leader_index)], conflict_flag)
				//}
				rf.log = append(rf.log, args.Entries[index:]...)
				break;
			}
		}
		//if no conflict fond in log,follower should reply success
		reply.Success = true
	}else{
		if args.PrevLogIndex>logUpperIndex{
			reply.XLen = logUpperIndex								//shorter than leader
		}else{
			reply.XTerm = rf.log[(args.PrevLogIndex)-1].Term		//PrevTerm
			reply.XLen = -1
			for index := args.PrevLogIndex;index>0;index--{
				if rf.log[(index-1)].Term == reply.XTerm{
					reply.XIndex = index							//decrement until prevTerm Changed
				}else{
					break;
				}
			}
		}
	}
	/**
	A_{n} matched,Log Duplication and conflict detect,Transaction need to be idempotent
	 */
	if reply.Success{
		//if len(rf.log)>args.PrevLogIndex{		//more log than leader,truncate follower log
		//	if LogEnabled {
		//		formal_log += fmt.Sprintf("\ntruncate log:[%v->%v]\n", rf.log, rf.log[0:(args.PrevLogIndex+1)-1])
		//	}
		//	//modify by alan in 4-22
		//	//log_copy := make([]Entry,args.PrevLogIndex)
		//	//copy(log_copy,rf.log[0:(args.PrevLogIndex+1)-1])
		//	rf.log = rf.log[0:(args.PrevLogIndex+1)-1]
		//}
		///**
		//	right boundary,raft index is [0:args.PrevLogIndex+1],array index is[0:(args.PrevLogIndex+1)-1]
		//	simply truncate raft log,Raft Accumulative Confirm
		// */
		//
		//
		///**
		//A_{n} matched,set A_{n+1}
		//transaction here need to be idempotent
		// */
		//if len(args.Entries)!=0{
		//	//if len(rf.log)<args.PrevLogIndex+1{	//ensure capacity
		//	//	rf.log=append(rf.log,Entry{})
		//	//}
		//
		//	//delete by alan 4-15,in case of leader crash,commitIndex will fall to 0
		//	rf.log = append(rf.log,args.Entries...)
		//
		//	//if args.PrevLogIndex<rf.commitIndex{
		//	//	left_boundary:=rf.commitIndex-args.PrevLogIndex
		//	//	//right_boundary:=
		//	//	rf.log = append(rf.log,args.Entries[left_boundary:len(args.Entries)]...)
		//	//}else{
		//	//	rf.log = append(rf.log,args.Entries...)
		//	//}
		//
		//	//rf.log = append(rf.log,args.Entries...)
		//	//rf.log[args.PrevLogIndex]=args.Entries[0]
		//}

		/**
		Raft to Applych,
		A_{n} Matched,Apply Msg base on LeaderCommit,apply range [rf.commitIndex+1:rf.LeaderCommit+1]
		*/
		if args.LeaderCommit>rf.commitIndex{
			rf.commitIndex = args.LeaderCommit
			if LogEnabled {
				formal_log += fmt.Sprintf("\nApply:%d,log:%v\n", rf.commitIndex, rf.log)
			}
			newCommitIndex := min(args.LeaderCommit,len(rf.log))
			rf.commitIndex = newCommitIndex
			go func(commitIndex int){rf.commitIndexChangeCh<-commitIndex}(rf.commitIndex)
			//delete by alan in 4-18,bug caused by concept confusion
			/**
				ensure range capacity
			 */
			//if rf.commitIndex<len(rf.log){
			//	rf.commitIndex = len(rf.log)
			//	formal_log+=fmt.Sprintf("\nApply:%d,log:%v\n",rf.commitIndex,rf.log)
			//
			//	go func(commitIndex int){rf.commitIndexChangeCh<-commitIndex}(rf.commitIndex)
			//
			//
			//
			//
			//	//command:=rf.log[(rf.commitIndex)-1].Command
			//	//commandIndex := rf.commitIndex
			//	//commandValid := true
			//	////TODO prevTerm index,batch executed?
			//	//go func(command interface{},commandIndex int,commandValid bool){
			//	//	rf.committedCh<-ApplyMsg{
			//	//		Command:command,
			//	//		CommandIndex:commandIndex,					//raft index here,not array index,LeaderID here
			//	//		CommandValid:commandValid}
			//	//}(command,commandIndex,commandValid)
			//
			//}
		}

	}
	if LogEnabled {
		formal_log+=fmt.Sprintf("duration:%v\n",time.Since(start))
		formal_log += fmt.Sprintf("\nAfter RPC rf:%s\n", rf)
		formal_log += fmt.Sprintf("reply:%v\n-----------AppendEntry End-----------\n", reply)
	}
	//add by alan 4-19,append entry rpc is time consume
	rf.prevTicker = time.Now()
	rf.Log(DebugL,formal_log)
	return
}
/**
	map index between raft index(start from 1) and array index(start from 0)
 */
func getLogIndex(raftIndex int) int{
	return raftIndex-1
}

func min(left int,right int) int{
	if left<right{
		return left
	}else{
		return right
	}

}

//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return.  Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
	//fmt.Printf("Send Request:peers,%d,server:%d\n",len(rf.peers),server)
	ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
	return ok
}

func (rf *Raft) sendAppendEntry(server int, args *AppendEntryArgs, reply *AppendEntryReply) bool {
	//fmt.Printf("Send Request:peers,%d,server:%d\n",len(rf.peers),server)
	ok := rf.peers[server].Call("Raft.AppendEntry", args, reply)
	return ok
}

func deepCopy(dst, src interface{}) error {
	var buf bytes.Buffer
	if err := gob.NewEncoder(&buf).Encode(src); err != nil {
		return err
	}
	return gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst)
}


func (rf *Raft) sendRequestVote_v1(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
	//fmt.Printf("Send Request:peers,%d,server:%d\n",len(rf.peers),server)
	replyChan:= make(chan *RequestVoteReply,ReqParallel)
	for i:=0;i<ReqParallel;i++{

		curReply:= &RequestVoteReply{}
		deepCopy(curReply,reply)
		go func(argsArg *RequestVoteArgs,serverArg int,replyArg *RequestVoteReply){
			time.AfterFunc(SingleRPCTimeout,func(){
				return
			})
			for i:=0;i<MaxRetry;i++{
				ok := rf.peers[server].Call("Raft.RequestVote", argsArg, replyArg)
				if ok{
					replyChan<-replyArg
					return
				}
			}
		}(args,server,curReply)

	}
	for {
		select {
		case curReply:=<-replyChan:							//valid reply
			reply.VoteGranted = curReply.VoteGranted
			reply.Term = curReply.Term
			return true
		case <-time.After(RPCTimeout):					//timeout
			return false
		}
	}
}

func (rf *Raft) sendAppendEntry_v1(server int, args *AppendEntryArgs, reply *AppendEntryReply) bool {
	//fmt.Printf("Send Request:peers,%d,server:%d\n",len(rf.peers),server)
	replyChan:= make(chan *AppendEntryReply,ReqParallel)
	for i:=0;i<ReqParallel;i++{

		curReply:= &AppendEntryReply{}
		deepCopy(curReply,reply)
		go func(argsArg *AppendEntryArgs,serverArg int,replyArg *AppendEntryReply){
			time.AfterFunc(SingleRPCTimeout,func(){
				return
			})
			for i:=0;i<MaxRetry;i++{
				ok := rf.peers[serverArg].Call("Raft.AppendEntry", argsArg, replyArg)
				if ok{
					replyChan<-replyArg
					return
				}
			}
		}(args,server,curReply)

	}
	for {
		select {
		case curReply:=<-replyChan:							//valid reply
			reply.Term = curReply.Term
			reply.Success = curReply.Success
			reply.XLen = curReply.XLen
			reply.XIndex = curReply.XIndex
			reply.XTerm = curReply.XTerm
			return true
		case <-time.After(RPCTimeout):					//timeout
			return false
		}
	}
}



//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// Term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
	index := -1
	term := -1
	isLeader := true

	// Your code here (2B).
	rf.lock("start append")
	defer rf.unlock("start append")
	defer rf.persist()
	if rf.state!=Leader{
		return -1,rf.currentTerm,false
	}
	entry:=Entry{Term:rf.currentTerm,Command:command}
	rf.log = append(rf.log,entry)							//raft log need to be monotonic
	index = len(rf.log)										//index start from 1,raft index
	term =rf.currentTerm
	isLeader = true

	//An1_saved:=make([]int,0)
	//An1_saved = append(An1_saved,rf.me)
	//rf.entryReplicaRec[index]=An1_saved
	//rf.Log(DebugL,fmt.Sprintf("[Replica Count]-Initialize,%s",rf))


	rf.Log(DebugL,fmt.Sprintf("Start Result:{index:%d,term:%d,isLeader:%v}",index,term,isLeader))
	return index, term, isLeader
}
//
// the tester calls Kill() when a Raft instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (rf *Raft) Kill() {
	// Your code here, if desired.
	rf.lock("Kill Raft Instance")
	rf.isTerminated=true
	go func(){rf.terminateChan<-"Signal Kill"}()
	rf.flushLog()
	rf.unlock("Kill Raft Instance")
}


////数据可见性即可
//func (rf *Raft) Kill() {
//	atomic.StoreInt32(&rf.dead, 1)
//	// Your code here, if desired.
//}
//
//func (rf *Raft) killed() bool {
//	z := atomic.LoadInt32(&rf.dead)
//	return z == 1
//}

//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//


func (rf *Raft) ticker_v1() {
	// Your code here to check if a leader election should
	// be started and to randomize sleeping time using
	// time.Sleep().
	//rf.lock("get interval")
	//
	//rf.unlock("get interval")
	for true{
		interval:=genRandElectionDuration()
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("interval:%d", interval))
		}
		time.Sleep(interval)
		if atomic.LoadInt32(&rf.isLeader)==1{
			continue
		}
		rf.lock("check terminated & get ticker")
		if rf.isTerminated{
			//TODO ?????????????????????????????????????????????
			rf.unlock("check terminated & get ticker")
			break
		}
		duration:=time.Since(rf.prevTicker)
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("[Duration Judge]rf:%s,duration:%v,interval:%d", rf, duration, interval))
		}
		state := rf.state

		//if duration >interval &&state!=Leader{
		//	go rf.TransRequestVote()
		//}else{
		//	rf.unlock("check terminated & get ticker")
		//	continue
		//}
		//rf.unlock("check terminated & get ticker")

		rf.unlock("check terminated & get ticker")
		if duration >interval &&state!=Leader{
			//rf.TransRequestVote()
			rf.TransRequestVote_v1()
		}else{
			continue
		}




		//rf.lock("Candidate Check")
		//rf.prevTicker = time.Now()
		//rf.unlock("Candidate Check")
	}
}


// The ticker go routine starts a new election if this peer hasn't received
// heartsbeats recently.
func (rf *Raft) ticker() {
	// Your code here to check if a leader election should
	// be started and to randomize sleeping time using
	// time.Sleep().
	//rf.lock("get interval")
	//
	//rf.unlock("get interval")
	for true{
		interval:=genRandElectionDuration()
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("interval:%d", interval))
		}
		time.Sleep(interval)
		rf.lock("check terminated & get ticker")
		if rf.isTerminated{
			//TODO ?????????????????????????????????????????????
			rf.unlock("check terminated & get ticker")
			break
		}
		duration:=time.Since(rf.prevTicker)
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("[Duration Judge]rf:%s,duration:%v,interval:%d", rf, duration, interval))
		}
		state := rf.state

		//if duration >interval &&state!=Leader{
		//	go rf.TransRequestVote()
		//}else{
		//	rf.unlock("check terminated & get ticker")
		//	continue
		//}
		//rf.unlock("check terminated & get ticker")

		rf.unlock("check terminated & get ticker")
		if duration >interval &&state!=Leader{
			//rf.TransRequestVote()
			rf.TransRequestVote_v1()
		}else{
			continue
		}




		//rf.lock("Candidate Check")
		//rf.prevTicker = time.Now()
		//rf.unlock("Candidate Check")
	}
}

//func Serve1(rf *Raft){
//	for{
//		elec_timeout:=time.Duration(ElectionTimeout+rand.Int63n(100))*time.Millisecond
//		context,cancel := context.WithTimeout(context.Background(),elec_timeout)
//		select{
//		case <-context.Done():
//			TransRequestVote(rf)						//vote in sequential
//		case <-rf.preemptedCh:			//preempted
//			rf.lock("Cancel  Election Context")
//			cancel()
//			rf.timeReset = true
//			rf.unlock("Cancel  Election Context")
//		}
//	}
//}


/**
	structure about timers and time
 */
type TimeContext struct {
	//mu sync.Mutex				//delete for code consistency
	format string
	elec_timer *time.Timer
	heart_timer *time.Timer
	elec_interval time.Duration
	heart_interval time.Duration
	RPCTimeout time.Duration

	prev_elec_tick time.Time
	prev_heart_tick time.Time
	heart_initial bool
	instant_interval time.Duration
}
/**
	struct to control Transaction
 */
type TransContrl struct{
	mu sync.Mutex
	accept int
	finished int
	accessed []int								//peers who respond within RPCTimeout
	transTermFlag bool
}
//切换为候选人
func(rf *Raft)becomeCandidate() bool{
	if rf.state==Follower||rf.state==Candidate{
		rf.state = Candidate
		rf.currentTerm+=1
		rf.votedFor = rf.me
		rf.Votes=1
		return true
	}else{
		rf.Log(ProductL,"状态切换失败，Candidate")
		return false
	}

}
func(rf *Raft)becomeFollower(term int) bool{
	return false
}
func(rf *Raft)becomeLeader() bool{
	if rf.state==Candidate{
		rf.state = Leader
		rf.isLeader = 1
		ini_next_index:=len(rf.log)+1				//initialize nextIndex,A_{n+1}
		for index,_:= range rf.nextIndex{
			if index==rf.me{
				continue;
			}
			rf.nextIndex[index]=ini_next_index
		}
		for index,_:=range rf.matchIndex{			//initialize matchIndex,0
			if index==rf.me{
				continue
			}
			rf.matchIndex[index]=0
		}
		fmt.Printf("The Leader is Selected:%d,it's Term:%d,it's log in %s\n",rf.me,rf.currentTerm,rf.logFd)
		return true
	}else{
		rf.Log(ProductL,"状态切换失败，Leader")
		return false
	}
}

func(rf *Raft)getLogIndex(){

}

//获取Raft日志对应的Term，以1开始，实际数据存储以0开始
func(rf *Raft)getRaftLogTerm(index int) int{
	return rf.log[index-1].Term
}
func (rf *Raft) TransRequestVote_v1(){
	rf.lock("Vote Start")
	defer rf.unlock("Vote Start")
	//if rf.killed(){
	//	return
	//}

	if rf.isTerminated{
		return
	}
	if(rf.becomeCandidate()){
		rf.persist()
	}else{
		return
	}
	var rawTerm = rf.currentTerm								//Start Point，treat election as a transaction,rawTerm is a key variable
	request:=& RequestVoteArgs{CandidatId: rf.me, Term:rawTerm}
	if len(rf.log)==0{											//A_{0},raft index start from 1
		request.LastLogIndex = 0
		request.LastLogTerm = 0
	}else{
		request.LastLogIndex = len(rf.log)						//A_{n}
		request.LastLogTerm = rf.getRaftLogTerm(request.LastLogIndex)
	}

	request.TimeStamp = getCurTimeStamp()						//方便进行网络监控
	request.PrevTicker = rf.prevTicker
	request.LastApply = rf.lastApplied

	for i,_:=range rf.peers{									//check configuration,majority,not group
		if i==rf.me{
			continue
		}
		go func(rfArg *Raft,rawTermArg int,indexArg int,requestArg *RequestVoteArgs){
			reply:=& RequestVoteReply{}
			if rfArg.sendRequestVote(indexArg,requestArg,reply){
				rfArg.handleRequestVoteReply(reply,rawTermArg)
			}
		}(rf,rawTerm,i,request)
	}
}


//处理投票请求RPC的响应，RequestVote
func (rf *Raft)handleRequestVoteReply(reply *RequestVoteReply,rawTerm int){
	//if(atomic.LoadInt32(&rf.currentTerm)>(int32)(rawTerm)){
	//
	//}

	lockName:=fmt.Sprintf("RV Reply")
	rf.lock(lockName)
	defer rf.unlock(lockName)
	if(rf.preemptivHandler_v1(reply,rawTerm)){
		return
	}
	if(reply.VoteGranted){
		rf.Votes+=1
		peerCount:=len(rf.peers)
		if rf.Votes>=peerCount/2+1{
			//当选为Leader,Candidate->Leader
			if(rf.becomeLeader()){

				rf.Instant_AE_v1()

				go rf.AE_Loop()
			}
		}
	}
}
//处理追加日志RPC的响应，AppendEntry
func (rf *Raft)handleAppendEntryReply(appendEntryServ *AppendEntryServ,rawTerm int){
	lockName:=fmt.Sprintf("AE Reply")
	rf.lock(lockName)
	defer rf.unlock(lockName)
	if(rf.preemptivHandler4AE_v1(appendEntryServ.reply,rawTerm)){
		return
	}
	rf.AppendEntryTransHandler(appendEntryServ)
}

/**
	RequestVote Transation,Lock is required to keep transaction ACID property
*/
func (rf *Raft) TransRequestVote(){
	rf.lock("Vote Start")
	if LogEnabled {
		rf.Log(DebugL, fmt.Sprintf("TransRequestVote:%s", rf))
	}
	if rf.isTerminated{
		if LogEnabled {
			rf.Log(ProductL, fmt.Sprintf("Terminated:%v", rf.isTerminated))
		}
		rf.unlock("Vote Exit in Terminate State")
		return
	}
	//TODO Candidate Deal Rule
	if rf.state == Follower || rf.state==Candidate{
		rf.state = Candidate
		rf.currentTerm+=1
		rf.votedFor = rf.me
		rf.persist()
	}else{
		if LogEnabled {
			rf.Log(ProductL, fmt.Sprintf("[Fatal Error],invalid state:%s", rf.state))
		}
		return
	}
	var rawTerm = rf.currentTerm						//treat election as a transaction,rawTerm is a key variable
	var peerCount = len(rf.peers)
	/**
	variable about vote,vote for myself
	 */
	elecControl:=& TransContrl{}
	elecControl.accept = 1
	elecControl.finished = 1
	elecControl.accessed=[]int{}								//peers who respond within RPCTimeout
	elecControl.transTermFlag = false							//variable indicate TransRequestVote transaction ended

	var preemptiveFlag = false


	request:=& RequestVoteArgs{CandidatId: rf.me, Term:rawTerm}
	if len(rf.log)==0{									//A_{0},raft index start from 1
		request.LastLogIndex = 0
		request.LastLogTerm = 0
	}else{
		request.LastLogIndex = len(rf.log)				//A_{n}
		request.LastLogTerm = rf.log[len(rf.log)-1].Term
	}
	request.TimeStamp = getCurTimeStamp()
	request.PrevTicker = rf.prevTicker
	request.LastApply = rf.lastApplied

	replyChan:=make(chan *RequestVoteReply,peerCount-1) //buffered chanenl made it nonblock
	for i,_:=range rf.peers{							//check configuration,majority,not group
		if i==rf.me{
			continue
		}
		go func(indexArg int,requestArg *RequestVoteArgs,transCtrl *TransContrl){
			reply:=& RequestVoteReply{}
			if rf.sendRequestVote(indexArg,requestArg,reply){
				transCtrl.mu.Lock()
				if LogEnabled{
					transCtrl.accessed = append(elecControl.accessed,indexArg)
					rf.Log(DebugL,fmt.Sprintf("RequestVote RPC of %d:\nrequest:\t%v\nreply:\t%v",indexArg,requestArg,reply))
				}
				if !transCtrl.transTermFlag{						//avoid sending to a closed channel
					replyChan<-reply
				}
				transCtrl.mu.Unlock()
			}else{
				if LogEnabled {
					rf.Log(DebugL, fmt.Sprintf("RequestVote RPC of %d:\t\nrequest:%v,No Valid Reply", indexArg, requestArg))
				}
			}
		}(i,request,elecControl)
	}
	//RPCTimer := time.NewTimer(RPCTimeout)
	RPCTimer := make(chan int,1)
	go func(){
		time.Sleep(RPCTimeout)
		RPCTimer<-1
	}()
	rf.unlock("Start Vote")
	/**
	---------------------------------------------------------------------------
	 */
	for{
		//transTermFlag:=false
		//elecControl.mu.Lock()
		//transTermFlag = elecControl.transTermFlag
		//elecControl.mu.Unlock()
		if elecControl.transTermFlag{
			close(replyChan)
			break;										//break for loop
		}
		select{
		case reply:=<-replyChan:
			//go func(rf *Raft,reply *RequestVoteReply,rawTerm int,elecControl *TransContrl){
			//	elecControl.mu.Lock()
			//	rf.lock("RequestVote Whole Lock")
			//	preemptiveFlag = preemptivHandler(rf,reply,rawTerm)
			//	if(preemptiveFlag){
			//		elecControl.transTermFlag = true
			//		rf.unlock("RequestVote Whole Lock")
			//		elecControl.mu.Unlock()
			//		return;
			//	}
			//	//judge valid reply
			//	elecControl.finished+=1
			//	if elecControl.finished==peerCount{					//if all request has finished,abort wait
			//		elecControl.transTermFlag = true
			//	}
			//	if reply.VoteGranted{elecControl.accept+=1}			//receive majority vote
			//	if elecControl.accept>=peerCount/2+1{
			//		elecControl.transTermFlag = true
			//	}
			//	rf.unlock("RequestVote Whole Lock")
			//	elecControl.mu.Unlock()
			//}(rf,reply,rawTerm,elecControl)


			elecControl.mu.Lock()
			lockName:=fmt.Sprintf("RV Reply")
			rf.lock(lockName)
			preemptiveFlag = preemptivHandler(rf,reply,rawTerm)
			if(preemptiveFlag){
				elecControl.transTermFlag = true
				rf.unlock(lockName)
				elecControl.mu.Unlock()
				break;
			}
			//judge valid reply
			elecControl.finished+=1
			if elecControl.finished==peerCount{					//if all request has finished,abort wait
				elecControl.transTermFlag = true
			}
			if reply.VoteGranted{elecControl.accept+=1}			//receive majority vote
			if elecControl.accept>=peerCount/2+1{
				elecControl.transTermFlag = true
			}
			rf.unlock(lockName)
			elecControl.mu.Unlock()

		case <-RPCTimer:							//election transaction timeout
			elecControl.mu.Lock()
			if LogEnabled {
				rf.Log(DebugL, "TransRequestVote RPC time exceed RPCTimeout")
			}
			elecControl.transTermFlag = true
			elecControl.mu.Unlock()
		}
	}
	if preemptiveFlag{
		if LogEnabled {
			rf.Log(ProductL, "Preempted by peers!give up RequestVote!")
		}
		return
	}
	if elecControl.accept>=peerCount/2+1{
		rf.lock("Candidate->Leader")
		preemptiveFlag = rf.currentTerm>rawTerm
		if(preemptiveFlag){
			rf.unlock("Candidate->Leader,abort")
			return
		}

		rf.changeToLeader()
		if LogEnabled {
			elecControl.mu.Lock()
			rf.Log(ProductL,fmt.Sprintf("[VoteSum]{ID:%d,Term:%d},Result:%v,Accessed:%v",rf.me,rf.currentTerm,rf.state==Leader,elecControl.accessed))
			elecControl.mu.Unlock()
		}
		//go func(){
		//rf.leaderChan<-fmt.Sprintf("%v",rf.me)
		//}()

		fmt.Printf("The Leader is Selected:%d,it's Term:%d,it's log in %s\n",rf.me,rf.currentTerm,rf.logFd)
		if LogEnabled {
			rf.Log(ProductL, fmt.Sprintf("The Leader is Selected:%d\n", rf.me))
		}

		//立刻发送一次心跳
		rf.Instant_AE()
		//将解锁延后至首次心跳后
		//rf.unlock("Candidate->Leader")

		AEWrapper(rf)
	}else{
		//if LogEnabled{
		//	rf.lock("Failed in Vote")
		//	elecControl.mu.Lock()
		//	rf.Log(ProductL,fmt.Sprintf("[%v][VoteSum]{ID:%dTerm:%d},Result:%v,Accessed:%v",getCurTimeStamp(),rf.me,rf.currentTerm,rf.state==Leader,elecControl.accessed))
		//	elecControl.mu.Unlock()
		//	rf.unlock("Failed in Vote")
		//}

	}
}
func (rf *Raft)Instant_AE() bool{
	rf.Log(DebugL,fmt.Sprintf("AE:%s",rf))
	if rf.state!=Leader ||rf.isTerminated{
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("Not a Leader,Give up AE!"))
		}
		return false
	}
	var peerCount = len(rf.peers)
	var rawTerm  = rf.currentTerm

	var aeControl=& TransContrl{}
	aeControl.finished = 1
	aeControl.transTermFlag = false
	aeControl.accessed=[]int{}

	//var preemptiveFlag = false


	/**
	add log replication here
	*/

	replyChan:=make(chan *AppendEntryServ,peerCount)  	//buffered chanenl made it nonblock
	for i,_:=range rf.peers{							//check configuration,majority,not group
		if i==rf.me{
			continue
		}
		/**
			Caution!extract args in monitor region,not goroutnie!
		 */
		request:=& AppendEntryArgs{LeaderId: rf.me, Term:rawTerm}
		next_index :=rf.nextIndex[i]
		request.PrevLogIndex = next_index-1
		if request.PrevLogIndex==0{
			request.PrevLogTerm = 0											//A_{0},index for raft,start from 1
		}else{
			request.PrevLogTerm = rf.log[(request.PrevLogIndex)-1].Term		//A_{n},array index start from 0
		}
		///**
		//A_{n+1}
		// */
		//entry_flag := false
		//matchIndex := rf.matchIndex[i]									//matched A_{n}
		//if matchIndex==request.PrevLogIndex{							//A_{n} matched,send A_{n+1} to follower
		//	if matchIndex<len(rf.log){									//log replication need
		//		entry_flag = true
		//	}
		//}
		//if entry_flag{
		//	/**
		//		accelerate log replication by send more than one entry per RPC
		//		add by alan 4-8,Caution!Slice is not safe to use in concurrent context,Deep Copy is needed
		//	 */
		//	//request.Entries = rf.log[(request.PrevLogIndex):(request.PrevLogIndex+1)]
		//	entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		//	entry_copy :=make([]Entry,len(entry_slice))
		//	copy(entry_copy,entry_slice)
		//	request.Entries = entry_copy
		//}else {
		//	request.Entries = make([]Entry, 0) //empty log
		//}


		//for liveness,just simply set entries as raft slice [nextIndex:]
		entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		entry_copy :=make([]Entry,len(entry_slice))
		copy(entry_copy,entry_slice)
		request.Entries = entry_copy
		/**
		Raft Majority,log to applyCh
		 */
		request.LeaderCommit = rf.commitIndex
		request.TimeStamp = getCurTimeStamp()
		request.PrevTicker = rf.prevTicker
		request.LastApply = rf.lastApplied

		go func(indexArg int,request *AppendEntryArgs,transCtrl *TransContrl){
			/**
			A_{n},treat A_{0} separately
			log index start from 1
			add by alan,4-6,treat A_{0} as a empty spaceholder
			 */

			reply:=& AppendEntryReply{}
			//rf.sendAppendEntry(index,request,reply)
			if rf.sendAppendEntry(indexArg,request,reply){
				transCtrl.mu.Lock()
				if LogEnabled{
					transCtrl.accessed = append(transCtrl.accessed,indexArg)
					rf.Log(DebugL,fmt.Sprintf("AE RPC of %d:\nrequest:\t%v\nreply:\t%v",indexArg,request,reply))
				}
				if !transCtrl.transTermFlag{
					replyChan<-&AppendEntryServ{request:request,reply:reply,index:indexArg}
				}
				transCtrl.mu.Unlock()
			}else{
				if LogEnabled{
					rf.Log(DebugL,fmt.Sprintf("AE RPC of %d:\t,request:\t,%v,No Valid Reply",indexArg,request))
				}
			}
		}(i,request,aeControl)
	}
	RPCTimer := make(chan int,1)
	go func(){
		time.Sleep(RPCTimeout)
		RPCTimer<-1
	}()

	rf.unlock("Candidate->Leader")
	return true


	/**
	---------------------------------------------------------------------------
	*/
	//for{
	//	//transTermFlag:=false
	//	//aeControl.mu.Lock()
	//	//transTermFlag = aeControl.transTermFlag
	//	//aeControl.mu.Unlock()
	//	if aeControl.transTermFlag{
	//		close(replyChan)
	//		break;										//break for loop
	//	}
	//	select{
	//	case appendEntryServ:=<-replyChan:
	//		//go func(rf *Raft,appendEntryServ *AppendEntryServ,rawTerm int,aeControl *TransContrl){
	//		//	aeControl.mu.Lock()
	//		//	rf.lock("AE Reply Whole Lock")
	//		//	preemptiveFlag = preemptivHandler4AE(rf,appendEntryServ.reply,rawTerm)
	//		//	if preemptiveFlag{
	//		//		aeControl.transTermFlag = true
	//		//		rf.unlock("AE Reply Whole Lock")
	//		//		aeControl.mu.Unlock()
	//		//		return;
	//		//	}
	//		//	aeControl.finished+=1
	//		//	if aeControl.finished==peerCount { //if all request has finished,abort wait
	//		//		aeControl.transTermFlag = true
	//		//	}
	//		//	rf.AppendEntryTransHandler(appendEntryServ)
	//		//
	//		//	rf.unlock("AE Reply Whole Lock")
	//		//	aeControl.mu.Unlock()
	//		//}(rf,appendEntryServ,rawTerm,aeControl)
	//
	//		aeControl.mu.Lock()
	//		lockName:=fmt.Sprintf("AE Reply.{%d}",appendEntryServ.index)
	//		rf.lock(lockName)
	//		preemptiveFlag = preemptivHandler4AE(rf,appendEntryServ.reply,rawTerm)
	//		if preemptiveFlag{
	//			aeControl.transTermFlag = true
	//			rf.unlock(lockName)
	//			aeControl.mu.Unlock()
	//			break;
	//		}
	//		aeControl.finished+=1
	//		if aeControl.finished==peerCount { //if all request has finished,abort wait
	//			aeControl.transTermFlag = true
	//		}
	//		rf.AppendEntryTransHandler(appendEntryServ)
	//
	//		rf.unlock(lockName)
	//		aeControl.mu.Unlock()
	//
	//
	//	case <-RPCTimer:							//election transaction timeout
	//		aeControl.mu.Lock()
	//		aeControl.transTermFlag = true
	//		aeControl.mu.Unlock()
	//	}
	//}
	////treat detail
	//if preemptiveFlag{
	//	return false
	//}
	////if LogEnabled{
	////	rf.lock("AE Reply 2")
	////	aeControl.mu.Lock()
	////	rf.Log(DebugL,fmt.Sprintf("[%v][Actual SendHeartBeat],{ID:%d,Term:%d},Accessed:%v\n",getCurTimeStamp(),rf.me,rf.currentTerm,aeControl.accessed))
	////	aeControl.mu.Unlock()
	////	rf.unlock("AE Reply 2")
	////}
	//return true
}
func (rf *Raft)Instant_AE_v1() bool{
	rf.Log(DebugL,fmt.Sprintf("AE:%s",rf))
	if rf.state!=Leader ||rf.isTerminated{
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("Not a Leader,Give up AE!"))
		}
		return false
	}
	var rawTerm  = rf.currentTerm

	var aeControl=& TransContrl{}
	aeControl.finished = 1
	aeControl.transTermFlag = false
	aeControl.accessed=[]int{}
	for i,_:=range rf.peers{							//check configuration,majority,not group
		if i==rf.me{
			continue
		}
		/**
			Caution!extract args in monitor region,not goroutnie!
		 */
		request:=& AppendEntryArgs{LeaderId: rf.me, Term:rawTerm}
		next_index :=rf.nextIndex[i]
		request.PrevLogIndex = next_index-1
		if request.PrevLogIndex==0{
			request.PrevLogTerm = 0											//A_{0},index for raft,start from 1
		}else{
			request.PrevLogTerm = rf.log[(request.PrevLogIndex)-1].Term		//A_{n},array index start from 0
		}
		///**
		//A_{n+1}
		// */
		//entry_flag := false
		//matchIndex := rf.matchIndex[i]									//matched A_{n}
		//if matchIndex==request.PrevLogIndex{							//A_{n} matched,send A_{n+1} to follower
		//	if matchIndex<len(rf.log){									//log replication need
		//		entry_flag = true
		//	}
		//}
		//if entry_flag{
		//	/**
		//		accelerate log replication by send more than one entry per RPC
		//		add by alan 4-8,Caution!Slice is not safe to use in concurrent context,Deep Copy is needed
		//	 */
		//	//request.Entries = rf.log[(request.PrevLogIndex):(request.PrevLogIndex+1)]
		//	entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		//	entry_copy :=make([]Entry,len(entry_slice))
		//	copy(entry_copy,entry_slice)
		//	request.Entries = entry_copy
		//}else {
		//	request.Entries = make([]Entry, 0) //empty log
		//}


		//for liveness,just simply set entries as raft slice [nextIndex:]
		entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		entry_copy :=make([]Entry,len(entry_slice))
		copy(entry_copy,entry_slice)
		request.Entries = entry_copy
		/**
		Raft Majority,log to applyCh
		 */
		request.LeaderCommit = rf.commitIndex
		request.TimeStamp = getCurTimeStamp()
		request.PrevTicker = rf.prevTicker
		request.LastApply = rf.lastApplied

		go func(indexArg int,requestArg *AppendEntryArgs,rawTermArg int){
			/**
			A_{n},treat A_{0} separately
			log index start from 1
			add by alan,4-6,treat A_{0} as a empty spaceholder
			 */

			reply:=& AppendEntryReply{}
			//rf.sendAppendEntry(index,request,reply)
			if rf.sendAppendEntry(indexArg,request,reply){
				rf.handleAppendEntryReply(&AppendEntryServ{request:requestArg,reply:reply,index:indexArg},rawTermArg)
			}else{
				if LogEnabled{
					rf.Log(DebugL,fmt.Sprintf("AE RPC of %d:\t,request:\t,%v,No Valid Reply",indexArg,request))
				}
			}
		}(i,request,rawTerm)
	}
	return true


	/**
	---------------------------------------------------------------------------
	*/
	//for{
	//	//transTermFlag:=false
	//	//aeControl.mu.Lock()
	//	//transTermFlag = aeControl.transTermFlag
	//	//aeControl.mu.Unlock()
	//	if aeControl.transTermFlag{
	//		close(replyChan)
	//		break;										//break for loop
	//	}
	//	select{
	//	case appendEntryServ:=<-replyChan:
	//		//go func(rf *Raft,appendEntryServ *AppendEntryServ,rawTerm int,aeControl *TransContrl){
	//		//	aeControl.mu.Lock()
	//		//	rf.lock("AE Reply Whole Lock")
	//		//	preemptiveFlag = preemptivHandler4AE(rf,appendEntryServ.reply,rawTerm)
	//		//	if preemptiveFlag{
	//		//		aeControl.transTermFlag = true
	//		//		rf.unlock("AE Reply Whole Lock")
	//		//		aeControl.mu.Unlock()
	//		//		return;
	//		//	}
	//		//	aeControl.finished+=1
	//		//	if aeControl.finished==peerCount { //if all request has finished,abort wait
	//		//		aeControl.transTermFlag = true
	//		//	}
	//		//	rf.AppendEntryTransHandler(appendEntryServ)
	//		//
	//		//	rf.unlock("AE Reply Whole Lock")
	//		//	aeControl.mu.Unlock()
	//		//}(rf,appendEntryServ,rawTerm,aeControl)
	//
	//		aeControl.mu.Lock()
	//		lockName:=fmt.Sprintf("AE Reply.{%d}",appendEntryServ.index)
	//		rf.lock(lockName)
	//		preemptiveFlag = preemptivHandler4AE(rf,appendEntryServ.reply,rawTerm)
	//		if preemptiveFlag{
	//			aeControl.transTermFlag = true
	//			rf.unlock(lockName)
	//			aeControl.mu.Unlock()
	//			break;
	//		}
	//		aeControl.finished+=1
	//		if aeControl.finished==peerCount { //if all request has finished,abort wait
	//			aeControl.transTermFlag = true
	//		}
	//		rf.AppendEntryTransHandler(appendEntryServ)
	//
	//		rf.unlock(lockName)
	//		aeControl.mu.Unlock()
	//
	//
	//	case <-RPCTimer:							//election transaction timeout
	//		aeControl.mu.Lock()
	//		aeControl.transTermFlag = true
	//		aeControl.mu.Unlock()
	//	}
	//}
	////treat detail
	//if preemptiveFlag{
	//	return false
	//}
	////if LogEnabled{
	////	rf.lock("AE Reply 2")
	////	aeControl.mu.Lock()
	////	rf.Log(DebugL,fmt.Sprintf("[%v][Actual SendHeartBeat],{ID:%d,Term:%d},Accessed:%v\n",getCurTimeStamp(),rf.me,rf.currentTerm,aeControl.accessed))
	////	aeControl.mu.Unlock()
	////	rf.unlock("AE Reply 2")
	////}
	//return true
}

/**
	HeartBeat Wrappper
 */
func (rf *Raft)AE_Loop(){
	for{
		rf.Log(DebugL,"------------------------Send Heart Beat------------------------")
		rf.lock("Heart Beat")
		if rf.state!=Leader||rf.isTerminated{
			if LogEnabled {
				rf.Log(DebugL, "------------------------Stop Heart Beat------------------------")
			}
			rf.unlock("Heart Beat")
			return;
		}
		rf.unlock("Heart Beat")
		//	go rf.AE()
		//time.Sleep(HeartBeatInterval)
		rf.AE_v1()

		time.Sleep(HeartBeatInterval) //4*200 passed
	}
}

/**
	HeartBeat Wrappper
 */
func AEWrapper(rf *Raft){
	for{
		rf.Log(DebugL,"------------------------Send Heart Beat------------------------")
		rf.lock("Heart Beat")
		if rf.state!=Leader||rf.isTerminated{
			if LogEnabled {
				rf.Log(DebugL, "------------------------Stop Heart Beat------------------------")
			}
			rf.unlock("Heart Beat")
			return;
		}
		rf.unlock("Heart Beat")
		//	go rf.AE()
		go rf.AE()

		time.Sleep(HeartBeatInterval)
	}
}
func (rf *Raft)AE() bool{

	rf.lock("AE Start")
	rf.Log(DebugL,fmt.Sprintf("AE:%s",rf))
	if rf.state!=Leader ||rf.isTerminated{
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("Not a Leader,Give up AE!"))
		}
		rf.unlock("AE Start")
		return false
	}
	var peerCount = len(rf.peers)
	var rawTerm  = rf.currentTerm

	var aeControl=& TransContrl{}
	aeControl.finished = 1
	aeControl.transTermFlag = false
	aeControl.accessed=[]int{}

	var preemptiveFlag = false


	/**
	add log replication here
	*/

	replyChan:=make(chan *AppendEntryServ,peerCount)  	//buffered chanenl made it nonblock
	for i,_:=range rf.peers{							//check configuration,majority,not group
		if i==rf.me{
			continue
		}
		/**
			Caution!extract args in monitor region,not goroutnie!
		 */
		request:=& AppendEntryArgs{LeaderId: rf.me, Term:rawTerm}
		next_index :=rf.nextIndex[i]
		request.PrevLogIndex = next_index-1
		if request.PrevLogIndex==0{
			request.PrevLogTerm = 0											//A_{0},index for raft,start from 1
		}else{
			request.PrevLogTerm = rf.log[(request.PrevLogIndex)-1].Term		//A_{n},array index start from 0
		}
		///**
		//A_{n+1}
		// */
		//entry_flag := false
		//matchIndex := rf.matchIndex[i]									//matched A_{n}
		//if matchIndex==request.PrevLogIndex{							//A_{n} matched,send A_{n+1} to follower
		//	if matchIndex<len(rf.log){									//log replication need
		//		entry_flag = true
		//	}
		//}
		//if entry_flag{
		//	/**
		//		accelerate log replication by send more than one entry per RPC
		//		add by alan 4-8,Caution!Slice is not safe to use in concurrent context,Deep Copy is needed
		//	 */
		//	//request.Entries = rf.log[(request.PrevLogIndex):(request.PrevLogIndex+1)]
		//	entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		//	entry_copy :=make([]Entry,len(entry_slice))
		//	copy(entry_copy,entry_slice)
		//	request.Entries = entry_copy
		//}else {
		//	request.Entries = make([]Entry, 0) //empty log
		//}


		//for liveness,just simply set entries as raft slice [nextIndex:]
		entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		entry_copy :=make([]Entry,len(entry_slice))
		copy(entry_copy,entry_slice)
		request.Entries = entry_copy
		/**
		Raft Majority,log to applyCh
		 */
		request.LeaderCommit = rf.commitIndex
		request.TimeStamp = getCurTimeStamp()
		request.PrevTicker = rf.prevTicker
		request.LastApply = rf.lastApplied

		go func(indexArg int,request *AppendEntryArgs,transCtrl *TransContrl){
			/**
			A_{n},treat A_{0} separately
			log index start from 1
			add by alan,4-6,treat A_{0} as a empty spaceholder
			 */

			reply:=& AppendEntryReply{}
			//rf.sendAppendEntry(index,request,reply)
			if rf.sendAppendEntry(indexArg,request,reply){
				transCtrl.mu.Lock()
				if LogEnabled{
					transCtrl.accessed = append(transCtrl.accessed,indexArg)
					rf.Log(DebugL,fmt.Sprintf("AE RPC of %d:\nrequest:\t%v\nreply:\t%v",indexArg,request,reply))
				}
				if !transCtrl.transTermFlag{
					replyChan<-&AppendEntryServ{request:request,reply:reply,index:indexArg}
				}
				transCtrl.mu.Unlock()
			}else{
				if LogEnabled{
					rf.Log(DebugL,fmt.Sprintf("AE RPC of %d:\t,request:\t,%v,No Valid Reply",indexArg,request))
				}
			}
		}(i,request,aeControl)
	}
	RPCTimer := make(chan int,1)
	go func(){
		time.Sleep(RPCTimeout)
		RPCTimer<-1
	}()
	rf.unlock("AE Start")
	/**
	---------------------------------------------------------------------------
	*/
	for{
		//transTermFlag:=false
		//aeControl.mu.Lock()
		//transTermFlag = aeControl.transTermFlag
		//aeControl.mu.Unlock()
		if aeControl.transTermFlag{
			close(replyChan)
			break;										//break for loop
		}
		select{
		case appendEntryServ:=<-replyChan:
			//go func(rf *Raft,appendEntryServ *AppendEntryServ,rawTerm int,aeControl *TransContrl){
			//	aeControl.mu.Lock()
			//	rf.lock("AE Reply Whole Lock")
			//	preemptiveFlag = preemptivHandler4AE(rf,appendEntryServ.reply,rawTerm)
			//	if preemptiveFlag{
			//		aeControl.transTermFlag = true
			//		rf.unlock("AE Reply Whole Lock")
			//		aeControl.mu.Unlock()
			//		return;
			//	}
			//	aeControl.finished+=1
			//	if aeControl.finished==peerCount { //if all request has finished,abort wait
			//		aeControl.transTermFlag = true
			//	}
			//	rf.AppendEntryTransHandler(appendEntryServ)
			//
			//	rf.unlock("AE Reply Whole Lock")
			//	aeControl.mu.Unlock()
			//}(rf,appendEntryServ,rawTerm,aeControl)

			aeControl.mu.Lock()
			lockName:=fmt.Sprintf("AE Reply.{%d}",appendEntryServ.index)
			rf.lock(lockName)
			preemptiveFlag = preemptivHandler4AE(rf,appendEntryServ.reply,rawTerm)
			if preemptiveFlag{
				aeControl.transTermFlag = true
				rf.unlock(lockName)
				aeControl.mu.Unlock()
				break;
			}
			aeControl.finished+=1
			if aeControl.finished==peerCount { //if all request has finished,abort wait
				aeControl.transTermFlag = true
			}
			rf.AppendEntryTransHandler(appendEntryServ)

			rf.unlock(lockName)
			aeControl.mu.Unlock()


		case <-RPCTimer:							//election transaction timeout
			aeControl.mu.Lock()
			aeControl.transTermFlag = true
			aeControl.mu.Unlock()
		}
	}
	//treat detail
	if preemptiveFlag{
		return false
	}
	//if LogEnabled{
	//	rf.lock("AE Reply 2")
	//	aeControl.mu.Lock()
	//	rf.Log(DebugL,fmt.Sprintf("[%v][Actual SendHeartBeat],{ID:%d,Term:%d},Accessed:%v\n",getCurTimeStamp(),rf.me,rf.currentTerm,aeControl.accessed))
	//	aeControl.mu.Unlock()
	//	rf.unlock("AE Reply 2")
	//}
	return true
}

func (rf *Raft)AE_v1() bool{

	rf.lock("AE Start")

	rf.Log(DebugL,fmt.Sprintf("AE:%s",rf))
	if rf.state!=Leader ||rf.isTerminated{
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("Not a Leader,Give up AE!"))
		}
		rf.unlock("AE Start")
		return false
	}
	var rawTerm  = rf.currentTerm
	for i,_:=range rf.peers{							//check configuration,majority,not group
		if i==rf.me{
			continue
		}
		/**
			Caution!extract args in monitor region,not goroutnie!
		 */
		request:=& AppendEntryArgs{LeaderId: rf.me, Term:rawTerm}
		next_index :=rf.nextIndex[i]
		request.PrevLogIndex = next_index-1
		if request.PrevLogIndex==0{
			request.PrevLogTerm = 0											//A_{0},index for raft,start from 1
		}else{
			request.PrevLogTerm = rf.log[(request.PrevLogIndex)-1].Term		//A_{n},array index start from 0
		}


		//for liveness,just simply set entries as raft slice [nextIndex:]
		entry_slice:= rf.log[(request.PrevLogIndex+1)-1:(len(rf.log)+1)-1]
		entry_copy :=make([]Entry,len(entry_slice))
		copy(entry_copy,entry_slice)
		request.Entries = entry_copy
		/**
		Raft Majority,log to applyCh
		 */
		request.LeaderCommit = rf.commitIndex
		request.TimeStamp = getCurTimeStamp()
		request.PrevTicker = rf.prevTicker
		request.LastApply = rf.lastApplied

		go func(indexArg int,requestArg *AppendEntryArgs,rawTermArg int){
			/**
			A_{n},treat A_{0} separately
			log index start from 1
			add by alan,4-6,treat A_{0} as a empty spaceholder
			 */

			reply:=& AppendEntryReply{}
			//rf.sendAppendEntry(index,request,reply)
			if rf.sendAppendEntry(indexArg,request,reply){
				rf.handleAppendEntryReply(&AppendEntryServ{request:requestArg,reply:reply,index:indexArg},rawTermArg)
			}else{
				if LogEnabled{
					rf.Log(DebugL,fmt.Sprintf("AE RPC of %d:\t,request:\t,%v,No Valid Reply",indexArg,request))
				}
			}
		}(i,request,rawTerm)
	}
	RPCTimer := make(chan int,1)
	go func(){
		time.Sleep(RPCTimeout)
		RPCTimer<-1
	}()
	rf.unlock("AE Start")
	return true
}
/**
	extract append entry reply handler
 */
func (rf * Raft)AppendEntryTransHandler(appendEntryServ *AppendEntryServ){
	/**
		request and reply is one to one map,different A_{n}
	*/
	peer_index:=appendEntryServ.index
	peer_reply:=appendEntryServ.reply
	peer_request:=appendEntryServ.request

	/**
		compare info between rpc send and rpc reply received,keep transaction consistent
	 */


	if rf.nextIndex[peer_index]!=peer_request.PrevLogIndex+1{			//A_{n} should be the same
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("A_{%d} changed!,abort AE handler", peer_request.PrevLogIndex))
		}
		return
	}

	if peer_reply.Success{
		/**
		A_{n} matched,update A_{n} and A_{n+1}
		 */
		//rf.matchIndex[peer_index] = peer_request.PrevLogIndex			//match index compared,TODO if prev request has entry,increment by 1

		/**
		empty space holder may help
		deal with log entry replica,mainly focus on A_{n+1}
		 */
		An1_index := peer_request.PrevLogIndex+1						//A_{n+1} append,if request.entries is empty,treat it as a empty space holder
		if An1_index==rf.nextIndex[peer_index]{
			rf.nextIndex[peer_index] = An1_index + len(peer_request.Entries)
		}
		//fxxk
		if peer_request.PrevLogIndex+len(peer_request.Entries)>=rf.matchIndex[peer_index]{		//keep monotonic get idempotent,A_{0} is special
			rf.matchIndex[peer_index] = peer_request.PrevLogIndex+len(peer_request.Entries)
		}
		/**
			deal with commit index
		 */
		rf.dealWithCommitIndex_v1(peer_request.PrevLogIndex,peer_index,peer_request.Entries)

		//if len(peer_request.Entries)!=0{
		//	//TODO detail with prevTerm,keep handler detail idempotent			//log entry majority count
		//	/**
		//		deal with next index and match index
		//		add by alan 4-6,treat multiple entry as repeated one by one
		//	 */
		//}else{
		//	//deal with heartbeat
		//	if peer_request.PrevLogIndex>=rf.matchIndex[peer_index]{		//set matchindex to A_{n},update rule is monotonic
		//		rf.matchIndex[peer_index] = peer_request.PrevLogIndex
		//	}
		//}
	}else{
		/**
		A_{n} backtrack,lower boundary is 0,
		A_{0} always match
		add fast backup support
		*/
		//rf.nextIndex[peer_index]=rf.nextIndex[peer_index]-1
		if peer_reply.XLen!=-1{									//Follower Log is Too Short
			if LogEnabled {
				rf.Log(DebugL, fmt.Sprintf("log match fast back track,according to XLen:%d->%d", rf.nextIndex[peer_index], peer_reply.XLen+1))
			}
			rf.nextIndex[peer_index] = peer_reply.XLen+1
		}else{
			/**
				if XTerm is appear in leader's log
			 */
			XTerm_Flag:=false
			for index:=peer_request.PrevLogIndex;index>0;index--{
				if rf.log[(index)-1].Term == peer_reply.XTerm{
					rf.nextIndex[peer_index] = index+1
					if LogEnabled {
						rf.Log(DebugL, fmt.Sprintf("log match fast back track,according to XTerm:%d->%d", rf.nextIndex[peer_index], index+1))
					}
					XTerm_Flag = true
					break;
				}else{
					if rf.log[(index)-1].Term<peer_reply.XTerm{
						break;									//entry term less than XTerm,Stop
					}
				}
			}
			if !XTerm_Flag{
				rf.nextIndex[peer_index] = peer_reply.XIndex   //jump abscent Term
				if LogEnabled {
					rf.Log(DebugL, fmt.Sprintf("log match fast back track,according to XIndex:%d->%d", rf.nextIndex[peer_index], peer_reply.XIndex))
				}
			}else{

			}
		}
	}
}
func contains(set []int,value int) bool{
	containsFlag :=false
	for _,v := range set{
		if v==value{
			containsFlag = true
			break;
		}
	}
	return containsFlag

}
/**
	deal with commit index
 */
func (rf *Raft) dealWithCommitIndex(An int,peer_index int,entrys []Entry){
	/**
		deal with commit index
	*/
	for index,_:=range entrys{
		An1_index := An+1+index
		An1_saved,_:=rf.entryReplicaRec[An1_index]
		if !contains(An1_saved,peer_index){
			An1_saved = append(An1_saved,peer_index)
			rf.entryReplicaRec[An1_index] = An1_saved
			if len(An1_saved)>=len(rf.peers)/2+1{							//commit by majority,leader count by 1, - LEADER_COUNT
				if LogEnabled {
					rf.Log(DebugL, fmt.Sprintf("[Commit By Majority]:%s", rf))
				}
				if An1_index>rf.commitIndex{	//commitIndex here should be monotonic,sequential,&& An1_index==rf.commitIndex+1
					rf.commitIndex = An1_index

					go func(commitIndex int){rf.commitIndexChangeCh<-commitIndex}(rf.commitIndex)

					//command:=rf.log[(rf.commitIndex)-1].Command
					//commandIndex := rf.commitIndex
					//commandValid := true
					////TODO prevTerm index,batch executed?
					//go func(command interface{},commandIndex int,commandValid bool){
					//	rf.committedCh<-ApplyMsg{
					//		Command:command,
					//		CommandIndex:commandIndex,					//raft index here,not array index,LeaderID here
					//		CommandValid:commandValid}
					//}(command,commandIndex,commandValid)				//closure alert

					//rf.committedCh<-ApplyMsg{
					//	Command:command,
					//	CommandIndex:commandIndex,					//raft index here,not array index,LeaderID here
					//	CommandValid:commandValid}
				}
			}else{
				if LogEnabled {
					rf.Log(DebugL, fmt.Sprintf("[Replica Count]-Minority,total:%d,appended:%d", (len(rf.peers)+1)/2, len(An1_saved)))
				}
			}
		}else{
			if LogEnabled {
				rf.Log(DebugL, fmt.Sprintf("[Replica Count]-Duplicate,[%v],%d", An1_saved, peer_index))
			}
		}
	}
}

//func (rf *Raft) dealWithCommitIndex_v1(An int,peer_index int,entrys []Entry){
//	/**
//		deal with commit index
//	*/
//	nextCommitIndex:=rf.commitIndex+1
//	count:=0
//	for _,peerMatchIndex:=range rf.matchIndex{
//		if peerMatchIndex>=nextCommitIndex{	//update should be executed in batch
//			count+=1
//		}
//	}
//	if count>=len(rf.peers)/2{				//server count by 1
//		rf.commitIndex=nextCommitIndex
//		rf.Log(DebugL,fmt.Sprintf("[CommitIndex Update]:%d->%d,count:%d\n%s",nextCommitIndex-1,nextCommitIndex,count,rf))
//		go func(commitIndex int){rf.commitIndexChangeCh<-commitIndex}(rf.commitIndex)
//	}
//}

/**
	deal with commit index
 */
func (rf *Raft) dealWithCommitIndex_v1(An int,peer_index int,entrys []Entry){
	/**
		deal with commit index,may update by batch,leader can only update it's commitIndex of it's Term
	*/
	new_index:=rf.commitIndex
	for i:=rf.commitIndex+1;i<=len(rf.log);i++{
		count:=1
		for _,peerMatchIndex:=range rf.matchIndex{
			if peerMatchIndex>=i && rf.log[peerMatchIndex-1].Term==rf.currentTerm{
				count+=1
			}
		}
		if count>=len(rf.peers)/2+1{		//server count by 1
			new_index=i
		}
	}
	if new_index!=rf.commitIndex{
		rf.commitIndex = new_index
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("[CommitIndex Update,%d->%d]:%s", new_index-1, new_index, rf))
		}
		go func(commitIndex int){rf.commitIndexChangeCh<-commitIndex}(rf.commitIndex)
	}
}
type LockEntry struct{
	lockName string
	lockTime string
}
func(locEntry *LockEntry) String() string{
	if LogEnabled{
		return fmt.Sprintf("{lockName:%s,lockTime:%s}",locEntry.lockName,locEntry.lockTime)
	}else{
		return ""
	}
}
//
// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
//
func Make(peers []*labrpc.ClientEnd, me int,
	persister *Persister, applyCh chan ApplyMsg) *Raft {
	rf := &Raft{}
	rf.mu.Lock()
	defer rf.mu.Unlock()
	rf.peers = peers
	rf.persister = persister
	rf.me = me
	// Your initialization code here (2A, 2B, 2C).

	/**
	Raft FSM State change
	 */
	rf.state = Follower
	rf.currentTerm=0
	rf.votedFor=-1
	rf.prevTicker = time.Now()
	rf.electionInterval=genRandElectionDuration()

	rf.terminateChan = make(chan string,1)
	/**
	append make it length increment dynamic
	log applied
	*/
	rf.log = make([]Entry,0)
	rf.committedCh = make(chan ApplyMsg,10)			//buffered make it nonblock,and sequential
	rf.commitIndexChangeCh = make(chan int,1)
	rf.commitIndex = 0								//log entry index start from 1
	rf.lastApplied = 0
	/**
	used for raft cluster log replication
	 */
	rf.nextIndex = make([]int,len(rf.peers))
	rf.matchIndex = make([]int,len(rf.peers))
	/**
	Lock Detect
	 */
	rf.lockChan = make(chan string,1)
	rf.unlockChan = make(chan string,1)
	/**
	Log fd
	 */
	iniLogFd:=fmt.Sprintf("log_%d_%d.txt", me,getLogName())
	rf.logFd=iniLogFd
	if LogEnabled{
		rf.Log(ProductL,fmt.Sprintf("Raft Instance Start,ID:%d\n\n",rf.me))
	}
	rf.logInfo=""
	// Main Goroutine
	go rf.ticker()												//prev version:go Serve(rf)


	//go rf.ticker_v1()


	/**
		Apply log to config's log
	 */
	go func(){
		applyIndex:=1									//raft index start from 1
		raftCommitIndex :=0
		logSlice:=make([]Entry,0)						//used as log copy
		for{
			select{
			case <-rf.terminateChan:
				if LogEnabled{
					rf.Log(ProductL, fmt.Sprintf("\n-----------Raft Terminated-----------\n"))
				}
				return;
			/**
				commitIndex Changed,apply msg [rf.commit,len(rf.log),leaderCommit]
			 */
			case newlyCommitIndex:=<-rf.commitIndexChangeCh:
				if newlyCommitIndex>raftCommitIndex{		//monotonic

					rf.lock("Extract Msg From Log")
					if LogEnabled {
						rf.Log(DebugL, fmt.Sprintf("[Apply]CommitIndex Changed:[%d->%d],Append Log:[%d:%d]", raftCommitIndex, newlyCommitIndex, (raftCommitIndex+1)-1, (newlyCommitIndex+1)-1))
					}
					upperIndex:=(newlyCommitIndex+1)-1
					if upperIndex==len(rf.log){
						logSlice=append(logSlice,rf.log[(raftCommitIndex+1)-1:]...)
					}else {
						logSlice = append(logSlice, rf.log[(raftCommitIndex+1)-1:upperIndex]...)
					}
					rf.unlock("Extract Msg From Log")
					raftCommitIndex = newlyCommitIndex
				}
				/**
					Cause in raft's log [0:CommitIndex+1] will not changed,it is concurrent safe
				 */
				if LogEnabled {
					rf.Log(DebugL, fmt.Sprintf("[Apply]Apply Range:[%d->%d]", applyIndex, raftCommitIndex))
				}
				for i:=applyIndex;i<=raftCommitIndex;i++{				//apply in a sequential way,synchronized
					if LogEnabled {
						rf.Log(DebugL, fmt.Sprintf("[Apply]Apply Single Term:[%v]", logSlice[(applyIndex)-1]))
					}
					command:=logSlice[(applyIndex)-1].Command
					commandIndex :=applyIndex

					commandValid := true
					//TODO prevTerm index,batch executed?
					applyCh<-ApplyMsg{
						Command:command,
						CommandIndex:commandIndex,					//raft index here,not array index,LeaderID here
						CommandValid:commandValid}
					rf.lock("Apply Index Changed")
					rf.lastApplied = applyIndex
					rf.unlock("Apply Index Changed")


					applyIndex+=1
				}
			}
		}
	}()
	// Lock Detect
	if LockDetectEnabled{												//TODO can be replaced by bool const,like LOCK_DETECT_ENABLED
		go func(){
			//lock_timeout := make(chan string,1)
			//var lock_start_time time.Time
			lockSeq:=make([]LockEntry,0)
			//unlockSeq:=make([]LockEntry,0)
			//var lock_end_time time.Time
			//lock_finished:=false
			lockName := ""
			unlockName:=""
			for{

				select {
				case <-rf.terminateChan:
					if LogEnabled {
						rf.Log(ProductL, fmt.Sprintf("\n-----------Raft Terminated-----------\n"))
					}
					return;
				case lockName = <-rf.lockChan:
					//fmt.Printf("tart Lock:%s\n",lockName)
					//go func(lockNameArg string) {
					//	time.Sleep(LockTimeout)
					//	lock_timeout <- lockNameArg
					//}(lockName)
					//lock_start_time = time.Now()
					lockEntry:=LockEntry{}
					lockEntry.lockName="+"+lockName
					lockEntry.lockTime = getCurTimeStamp()
					lockSeq=append(lockSeq,lockEntry)
					if LogEnabled {
						rf.Log(DebugL, fmt.Sprintf("LockSequence:%v\n",lockSeq))
					}
					//lock_finished = false
				case unlockName = <-rf.unlockChan:
					//lock_finished = true
					//lock_end_time = time.Now()
					lockEntry:=LockEntry{}
					lockEntry.lockName="-"+unlockName
					lockEntry.lockTime = getCurTimeStamp()
					lockSeq=append(lockSeq,lockEntry)
					if LogEnabled {
						rf.Log(DebugL, fmt.Sprintf("LockSequence:%v\n",lockSeq))
					}
				//case timeoutLock:=<-lock_timeout:
				//	if !lock_finished {
				//		if LogEnabled {
				//			rf.Log(DebugL, fmt.Sprintf("[Raft %d],%s timeout\n", rf.me, timeoutLock))
				//		}
				//	}
				}
			}
		}()
	}
	// initialize from state persisted before a crash
	rf.readPersist(persister.ReadRaftState())
	if rf.logFd!=iniLogFd{
		if LogEnabled {
			rf.Log(DebugL, fmt.Sprintf("[Raft %d],iniFd->rawFd[%s->%s]\n", rf.me,iniLogFd,rf.logFd))
		}
	}
	return rf
}



/**
  change state to a leader
 */
func (rf *Raft)changeToLeader(){
	rf.state = Leader
	ini_next_index:=len(rf.log)+1				//initialize nextIndex,A_{n+1}
	for index,_:= range rf.nextIndex{
		if index==rf.me{
			continue;
		}
		rf.nextIndex[index]=ini_next_index
	}
	for index,_:=range rf.matchIndex{			//initialize matchIndex,0
		if index==rf.me{
			continue
		}
		rf.matchIndex[index]=0
	}
	//add noop entry
	//rf.log=append(rf.log,Entry{Command:-1,Term:rf.currentTerm})
}
/**
  	change state to a follower
 */
func (rf *Raft)changeToFollower(){
	//TODO reset election timer?
	rf.state = Follower
}
/**
  	change state to a follower formally
	for liveness it is called only in 3 situations:
	1:GrantVote to peer
	2:Receive Leader HeartBeat
	3:ElectionTimer Expired
 */
func (rf *Raft)changeToFollowerAndResetTimer(){
	rf.state = Follower
	rf.prevTicker = time.Now()
	//rf.resetElectionTimer()
}

/**
	generate random election duration
 */
func genRandElectionDuration() time.Duration{
	num := globalRand.Intn(250)
	return time.Duration(150+num) * time.Millisecond
}
/**
	Preemptive Handler
*/
func preemptivHandler(rf *Raft,reply *RequestVoteReply,rawTerm int) bool{
	preemtiveFlag := false
	if rf.currentTerm!=rawTerm{
		preemtiveFlag=true						//preempted by peer in a passive way
		return true
	}
	if reply.Term>rf.currentTerm{				//preempted by peer in a positive way
		rf.currentTerm = reply.Term
		rf.votedFor = -1
		preemtiveFlag = true
		rf.changeToFollower()
		rf.persist()
	}
	return preemtiveFlag
}
func (rf *Raft)preemptivHandler_v1(reply *RequestVoteReply,rawTerm int) bool{
	preemtiveFlag := false
	if rf.currentTerm!=rawTerm{
		preemtiveFlag=true						//preempted by peer in a passive way
		return preemtiveFlag
	}
	if reply.Term>rf.currentTerm{				//preempted by peer in a positive way
		rf.state=Follower
		rf.currentTerm = reply.Term
		rf.votedFor = -1
		rf.Votes=0
		rf.persist()

		preemtiveFlag = true
	}
	return preemtiveFlag
}
/**
	Preemptive Handler
*/
func preemptivHandler4AE(rf *Raft,reply *AppendEntryReply,rawTerm int) bool{
	preemptiveFlag := false
	//defer rf.Log(DebugL,fmt.Sprintf("[Preempted:%v],reply:%v,rf:%s,rawTerm:%d",preemptiveFlag,reply,rf,rawTerm))
	if rf.currentTerm!=rawTerm{
		preemptiveFlag=true						//preempted by peer in a passive way
		return true
	}
	if reply.Term>rf.currentTerm{				//preempted by peer in a positive way
		rf.currentTerm = reply.Term
		rf.votedFor = -1
		preemptiveFlag = true
		rf.persist()
	}
	if preemptiveFlag{
		rf.state = Follower
	}
	return preemptiveFlag
}

/**
	Preemptive Handler
*/
func (rf *Raft)preemptivHandler4AE_v1(reply *AppendEntryReply,rawTerm int) bool{
	preemptiveFlag := false
	//defer rf.Log(DebugL,fmt.Sprintf("[Preempted:%v],reply:%v,rf:%s,rawTerm:%d",preemptiveFlag,reply,rf,rawTerm))
	if rf.currentTerm!=rawTerm{
		preemptiveFlag=true						//preempted by peer in a passive way
		return true
	}
	if reply.Term>rf.currentTerm{				//preempted by peer in a positive way
		rf.currentTerm = reply.Term
		rf.votedFor = -1
		preemptiveFlag = true
		rf.persist()
	}
	if preemptiveFlag{
		rf.state = Follower
	}
	return preemptiveFlag
}
func getCurTimeStamp() string {
	timeStamp:=time.Now().Format("2006-01-02 15:04:05.000")
	return timeStamp
}
func getLogName() int64 {
	timeStamp:=time.Now().Unix()
	return timeStamp
}
func (rf *Raft)resetElectionTimer(){
	rf.timeContext.elec_timer.Stop()
	rf.timeContext.elec_timer.Reset(genRandElectionDuration())
}

func (rf *Raft)resetHeartBeatTimer(initialFlag bool){
	rf.timeContext.heart_timer.Stop()
	if initialFlag{
		rf.timeContext.heart_timer.Reset(rf.timeContext.instant_interval)
	}else{
		rf.timeContext.heart_timer.Reset(rf.timeContext.heart_interval)
	}
}
/**
	wrap concurrency control machanism
*/
func (rf *Raft) lock(lockName string){
	rf.mu.Lock()
	rf.lockName = lockName
	rf.lockTick = time.Now()
	if LockDetectEnabled {
		go func() { rf.lockChan <- lockName }()
		if LogEnabled{
			rf.Log(LockL, fmt.Sprintf("[%v][%d]lock:%s!\n", getCurTimeStamp(), rf.me, rf.lockName))
		}
	}

}
func (rf *Raft) unlock(lockName string){
	//if(time.Since(rf.lockTick)>MaxLockDuration){
	//	rf.Log(DebugL,fmt.Sprintf("[%d]lock:%s too much time!\n",rf.me,rf.lockName))
	//	//rf.Kill()
	//}

	rf.Log(LockL,fmt.Sprintf("[%d]unlock:%s\n",rf.me,rf.lockName))
	if LockDetectEnabled {
		go func() { rf.unlockChan <- lockName }()
	}
	rf.lockName = ""
	rf.mu.Unlock()

}


