// Package raft : raft 算法模块
// 尝试 raft 算法的实现，尝试替换 zookeeper
package raft

//
// raft 的基本接口
// 1. 创建一个Raft对象，通过调用StartRaftService的方法来启动服务
// rf = Make(...)
// 2. 调用Start()方法在一条日志上启动一致性
// rf.Start(command interface{}) (index, term, isleader)
// 3. 检查当前raft实例的状态
// rf.GetState() (term, isLeader)
// 4. 获取以已达成一致日志的channel
// ApplyMsg
//   each time a new entry is committed to the log, each Raft peer
//   should send an ApplyMsg to the service (or tester)
//   in the same server.
//

import (
	"bytes"
	"distributed_db/utils"
	"encoding/gob"
	"errors"
	"fmt"
	"math/rand/v2"
	"sync"
	"sync/atomic"
	"time"
)

// ApplyMsg 日志
type ApplyMsg struct {
	CommandValid bool        // 命令是否合法
	Command      interface{} // 日志具体内容
	CommandIndex int         // 日志的唯一编号
}

// LogEntry Raft存储日志的格式，即 Command 的类型
type LogEntry struct {
	EntryTerm int
	Command   interface{}
	Valid     bool
}

type ServerState int

const (
	Follower ServerState = iota
	Candidate
	Leader
	Killed
	Init
)

const (
	time_out_min        = 200
	time_out_max        = 300
	heart_beat_interval = 100 // ms
)

// Event 事件
type Event struct {
	cond  *sync.Cond
	state ServerState // 假设我们要监听的状态是一个布尔值
}

func NewEvent() *Event {
	e := &Event{state: Init}
	e.cond = sync.NewCond(&sync.Mutex{})
	return e
}

// 发生事件时调用此方法
func (e *Event) NotifyAs(state ServerState) {
	e.cond.L.Lock()
	defer e.cond.L.Unlock()
	e.state = state
	e.cond.Broadcast()
}

// 等待事件发生
func (e *Event) WaitAs(state ServerState) {
	e.cond.L.Lock()
	for e.state != state && e.state != Killed {
		e.cond.Wait()
	}
	e.cond.L.Unlock()
}

// 获取事件的值

func (e *Event) GetState() ServerState {
	defer e.cond.L.Unlock()
	e.cond.L.Lock()
	return e.state
}

// 设置事件的值
func (e *Event) SetAs(state ServerState) {
	e.cond.L.Lock()
	e.state = state
	e.cond.L.Unlock()
}

// A Go object implementing a single Raft peer.
type Raft struct {
	mu        sync.Mutex         // Lock to protect shared access to this peer's state
	peers     []*utils.ClientEnd // 其他raft实例的RPC end points
	persister *Persister         // 持久化Raft状态的工具
	me        int                // 在peers[]中的索引
	dead      int32              // set by Kill()
	//! state       ServerState
	state_event Event
	msg         chan ApplyMsg
	editTime    time.Time // 上一次 leader 传递消息的时间
	timeout     int       // ms

	// Your data here (2A, 2B, 2C).
	VotedFor    int
	CurrentTerm int
	Log         []LogEntry
	// temporarily saved
	commitIndex int
	lastApplied int
	isRunning   []bool
	leaderIndex int

	nextIndex  []int
	matchIndex []int
	// Look at the paper's Figure 2 for a description of what
	// state a Raft server must maintain.
	role sync.WaitGroup
	// debug
	start_time time.Time
}

func (rf *Raft) _since_start(server int, info string) {
	// fmt.Printf("S%d run %s at %dms\n", server, info, time.Since(rf.start_time).Milliseconds())
}

// 返回当前状态(term, isleader)
func (rf *Raft) GetState() (int, bool) {
	var term int
	var isleader bool
	// Your code here (2A).
	rf.mu.Lock()
	term = rf.CurrentTerm
	rf.mu.Unlock()
	isleader = rf.state_event.GetState() == Leader
	return term, isleader
}

func (rf *Raft) GetLeaderId() int {
	rf.mu.Lock()
	defer rf.mu.Unlock()
	return rf.leaderIndex
}

func (rf *Raft) ShowState() {
	rf.mu.Lock()
	fmt.Printf("S%d Log len:%d\n", rf.me, len(rf.Log))
	rf.mu.Unlock()
}

// 将当前Raft实例的身份转换为Follower
// 重置leaderIndex, 和为谁投票VotedFor
func (rf *Raft) toFollower(term int) {
	// fmt.Print(time.Now())
	rf.state_event.NotifyAs(Follower)
	rf.mu.Lock()
	rf.CurrentTerm = term
	rf.leaderIndex = -1
	rf.VotedFor = -1
	rf.persist()
	// fmt.Printf(" S%d len%d follower %d\n", rf.me, len(rf.Log), term)
	rf.mu.Unlock()
}

// 将当前Raft实例的身份转换为Candidate
func (rf *Raft) toCandidate() {
	// fmt.Print(time.Now())
	rf.state_event.NotifyAs(Candidate)
	rf.mu.Lock()
	//! rf.isLeader = false
	// !rf.state = Candidate
	rf.VotedFor = rf.me
	rf.CurrentTerm++
	rf.leaderIndex = -1
	// fmt.Printf("S%d Time out Term -> %d\n", rf.me, rf.CurrentTerm)
	rf._since_start(rf.me, "time out")
	rf.persist()
	rf.mu.Unlock()
}

// 将当前Raft实例的身份转换为Leader
func (rf *Raft) toLeader() {
	// fmt.Print(time.Now())
	rf.state_event.NotifyAs(Leader)
	rf.mu.Lock()
	fmt.Printf("S%d win the election %d with lastTerm %d len %d\n", rf.me, rf.CurrentTerm, rf.Log[len(rf.Log)-1].EntryTerm, len(rf.Log))
	rf.VotedFor = -1
	rf.nextIndex = make([]int, len(rf.peers))
	rf.matchIndex = make([]int, len(rf.peers))
	rf.isRunning = make([]bool, len(rf.peers))
	rf.leaderIndex = rf.me
	for idx := range rf.nextIndex {
		rf.nextIndex[idx] = len(rf.Log)
		rf.matchIndex[idx] = 0
		rf.isRunning[idx] = false
	}
	// lge := LogEntry{rf.CurrentTerm, "nops"}
	// rf.Log = append(rf.Log, lge)
	rf.nextIndex[rf.me] = len(rf.Log)
	rf.matchIndex[rf.me] = len(rf.Log) - 1
	rf.persist()
	rf.mu.Unlock()
	rf.Start(nil)
}

// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
// 持久化Raft的状态
func (rf *Raft) persist() {
	w := new(bytes.Buffer)
	e := gob.NewEncoder(w)
	e.Encode(rf.CurrentTerm)
	e.Encode(rf.VotedFor)
	e.Encode(rf.Log)
	data := w.Bytes()
	rf.persister.SaveRaftState(data)
}

// restore previously persisted state.
// 从持久化状态中恢复Raft的状态
func (rf *Raft) readPersist(data []byte) {
	if len(data) < 1 { // bootstrap without any state? for data is nil, len(data) == 0
		rf.mu.Lock()
		// fmt.Printf("S%d read persist empty\n", rf.me)
		rf.CurrentTerm = 0
		rf.VotedFor = -1 // null
		rf.Log = make([]LogEntry, 1)
		rf.Log[0].EntryTerm = 0
		rf.commitIndex = 0
		rf.mu.Unlock()
		return
	}
	// Example:
	r := bytes.NewBuffer(data)
	d := gob.NewDecoder(r)
	var currentTerm int
	var votedFor int
	var entries []LogEntry
	if d.Decode(&currentTerm) != nil ||
		d.Decode(&votedFor) != nil ||
		d.Decode(&entries) != nil {
		//   error...
		println("decode error!")
	} else {
		rf.mu.Lock()
		rf.CurrentTerm = currentTerm
		rf.VotedFor = votedFor
		rf.Log = entries
		rf.commitIndex = 0
		// fmt.Printf("S%d read persist len %d\n", rf.me, len(rf.Log))
		rf.mu.Unlock()
	}
}

// example RequestVote RPC arguments structure.
// field names must start with capital letters!
// 投票请求参数结构
type RequestVoteArgs struct {
	Term         int
	CandidateId  int
	LastLogIndex int
	LastLogTerm  int
}

// example RequestVote RPC reply structure.
// field names must start with capital letters!
// 投票回复参数结构
type RequestVoteReply struct {
	Term         int
	VotedGranted bool
}

// RequestVote RPC handler.
// 在一轮投票中，仅有一次投票的机会
// 处理如下情况
// 集群正常运行的情况下，有一个follower timeout，term增加1，并向其他raft发送投票请求
// 1.1 未超时的raft收到
// 1.2 同样超时的raft收到
// 1.3 收到来自多个不同的raft实例的投票请求
// 规则
// 1. 当发送者(Candidate)的Term要大于当前raft实例的Term时，更新当前raft实例的Term，并转为follower，允许其进行投票
// 2. 当发送者(Candidate)的Term小于当前raft实例的Term时，直接返回当前raft实例的Term，忽略这次投票
// 3. 当发送者(Candidate)的Term等于当前raft实例的Term时
// 3.1 有可能已经是leader，忽略这次投票
// 3.2 没投过票的通过规则判断，是否投票给该candidate
// 3.3 投过票的仅允许投票给之前投过票的candidate
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) error {
	// 该raft实例是否已启动
	if rf.state_event.GetState() == Init {
		return errors.New("Raft Service Not Started")
	}
	var ret RequestVoteReply
	ret.VotedGranted = false
	term, _ := rf.GetState()
	// 判断发送的请求的term是否大于当前raft实例的term
	if term < args.Term {
		// 更新raft实例的term，并转为follower
		rf.toFollower(args.Term)
	}
	// 重新获取状态
	term, isleader := rf.GetState()
	rf.mu.Lock()
	// term的关系
	// sender.Term > receiver.Term, 该情况在上面已被处理
	// sender.Term < receiver.Term, 说明sender比起receiver落后，不可可以投票，被忽略
	// term相等，且当前raft实例不是leader，则进行投票
	if term == args.Term && !isleader {
		// 判断是否已经投过票，如果投过票，则判断是否投给该candidate，否则投票给该candidate
		if rf.VotedFor == -1 || rf.VotedFor == args.CandidateId {
			// 判断哪个节点的日志更加“新”：(lastTerm, lastIndex)偏序比较
			// 若sender比起receiver的日志更新，则投票给该sender, 即这边的CandidateId
			if rf.Log[len(rf.Log)-1].EntryTerm < args.LastLogTerm ||
				(rf.Log[len(rf.Log)-1].EntryTerm == args.LastLogTerm &&
					len(rf.Log)-1 <= args.LastLogIndex) { // current one is older
				ret.VotedGranted = true
				rf.VotedFor = args.CandidateId
				rf.editTime = time.Now()
				// fmt.Printf("Term%d len%d S%d vote for S%d %d\n", args.Term, len(rf.Log), rf.me, args.CandidateId, args.LastLogTerm)
			}
		}
	}
	rf.persist()
	rf.mu.Unlock()
	ret.Term = term
	*reply = ret
	return nil
}

func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
	ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
	return ok
}

// 向server发送投票请求
func (rf *Raft) HandleRequestVote(server int, wg *sync.WaitGroup, result chan<- RequestVoteReply) {
	defer wg.Done()
	var args RequestVoteArgs
	var reply RequestVoteReply
	rf.mu.Lock()
	args.CandidateId = rf.me
	args.LastLogIndex = len(rf.Log) - 1
	args.LastLogTerm = rf.Log[len(rf.Log)-1].EntryTerm
	args.Term = rf.CurrentTerm
	rf.mu.Unlock()
	if rf.sendRequestVote(server, &args, &reply) {
		result <- reply
	}
}

type AppendEntriesArgs struct {
	Term         int
	LeaderId     int
	PrevLogIndex int // index of log that just append last time
	PrevLogTerm  int // the last log term
	Entries      []LogEntry
	LeaderCommit int
}

type AppendEntriesReply struct {
	Term    int
	Success bool
}

// AppendEntries RPC handler.
// 处理AppendEntries请求
// 规则
// 1. 当发送者(Leader)的Term要小于于当前raft实例的Term时，忽略这次AppendEntries
// 2. 当发送者(Leader)的Term大于等于当前raft实例的Term时，则更新当前raft实例的Term，并转为follower，并返回当前raft实例的Term
// 3. 当leader提交的索引大于当前raft实例的commitIndex时，更新commitIndex，并提交日志到状态机
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) error {
	// 该raft实例是否已启动
	if rf.state_event.GetState() == Init {
		return errors.New("Raft Service Not Started")
	}
	var ret AppendEntriesReply
	ret.Term, _ = rf.GetState()
	// rf.ShowState()
	// fmt.Printf("%d recieve entries %d\n", rf.me, len(args.Entries))
	// 忽视错误的terms
	if args.Term < ret.Term {
		ret.Success = false
	} else if rf.state_event.GetState() != Follower {
		// 判断是否为Follower，若不是，则本次AppendEntries忽略后续操作，仅更改角色
		// 这个逻辑的原由主要是可能在toFollwer后原本的角色任务尚未结束，尽量避免出现混杂
		rf.toFollower(args.Term)
		rf.mu.Lock()
		rf.leaderIndex = args.LeaderId
		rf.editTime = time.Now()
		rf.mu.Unlock()
	} else {
		rf.mu.Lock()
		// **********************************************
		rf._since_start(rf.me, fmt.Sprintf("set by L%d interval %vms", args.LeaderId, time.Since(rf.editTime).Milliseconds()))
		rf.editTime = time.Now()
		rf.leaderIndex = args.LeaderId
		if len(rf.Log)-1 >= args.PrevLogIndex && rf.Log[args.PrevLogIndex].EntryTerm == args.PrevLogTerm {
			// 截断日志
			rf.Log = rf.Log[:args.PrevLogIndex+1]
			// 增加日志
			// fmt.Printf("S%d append %d\n", rf.me, len(args.Entries))
			if len(args.Entries) > 0 {
				rf.Log = append(rf.Log, args.Entries...)
				// fmt.Printf("S%d append to %d\n", rf.me, len(rf.Log))
			}
			rf.persist()
			// 修改提交的日志数量
			if args.LeaderCommit > rf.commitIndex {
				i := rf.commitIndex + 1
				for ; i <= min(args.LeaderCommit, len(rf.Log)-1) && !rf.killed(); i++ {
					rf.msg <- ApplyMsg{rf.Log[i].Command != nil, rf.Log[i].Command, i}
				}
				// fmt.Printf("FS%d commit from %d to %d in at %d\n", rf.me, rf.commitIndex+1, i-1, rf.CurrentTerm)
				rf.commitIndex = min(args.LeaderCommit, len(rf.Log)-1)
				rf.lastApplied = rf.commitIndex
			}
			ret.Success = true
		}
		rf.mu.Unlock()
		// *****************************************
	}
	*reply = ret
	return nil
}

func (rf *Raft) sendAppendEntries(follwerId int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
	// t := time.Now()
	ok := rf.peers[follwerId].Call("Raft.AppendEntries", args, reply)
	// fmt.Printf("S%d reply after %dms\n", follwerId, time.Since(t).Milliseconds())
	return ok
}

// 生成要发送的entries
// 限制最大长度
func (rf *Raft) generate_entries(next int) ([]LogEntry, int) {
	limit := 50
	if limit <= 0 || next+limit >= len(rf.Log) {
		return rf.Log[next:], len(rf.Log)
	} else {
		return rf.Log[next : next+limit], next + limit
	}
}

// append失败需要回退nextIndex，回退的步长为step
func (rf *Raft) HandleAppendEntries(server int, wg *sync.WaitGroup, result chan<- AppendEntriesReply) {
	defer wg.Done()
	// args and reply
	var args AppendEntriesArgs
	var reply AppendEntriesReply
	var length int
	rf.mu.Lock()
	// if rf.isRunning[server] {
	// 	rf.mu.Unlock()
	// 	return
	// }
	args.LeaderCommit = rf.commitIndex
	args.LeaderId = rf.me
	args.PrevLogIndex = rf.nextIndex[server] - 1
	args.PrevLogTerm = rf.Log[rf.nextIndex[server]-1].EntryTerm
	args.Term = rf.CurrentTerm
	args.Entries, length = rf.generate_entries(rf.nextIndex[server])
	rf.mu.Unlock()
	if len(args.Entries) != 0 {
		// fmt.Printf("leader%d %d sent to S%d, index %d\n", args.LeaderId, length, server, args.PrevLogIndex)
	}
	// get reply
	step := 1
	for !rf.killed() && rf.sendAppendEntries(server, &args, &reply) {
		if reply.Success {
			// 更新已确定添加的长度
			rf.mu.Lock()
			rf.matchIndex[server] = length - 1
			rf.nextIndex[server] = length
			// fmt.Printf("S%d increase %d\n", server, len(args.Entries))
			rf.mu.Unlock()
			result <- reply
			break
		} else {
			term, il := rf.GetState()
			if term != reply.Term || !il {
				break
			}
			rf.mu.Lock()
			if rf.nextIndex[server] == 1 {
				utils.LogC(utils.Red, "S%d error from %d\n", server, args.LeaderId)
				rf.mu.Unlock()
				break
			}
			if !rf.isRunning[server] {
				rf.isRunning[server] = true
				// LogC(Green, "S%d trace back in %d\n", server, args.LeaderId)
			}
			if rf.nextIndex[server] <= step {
				rf.nextIndex[server] = 1
			} else {
				rf.nextIndex[server] -= step
			}
			step *= 4
			args.PrevLogIndex = rf.nextIndex[server] - 1
			args.PrevLogTerm = rf.Log[rf.nextIndex[server]-1].EntryTerm
			args.Term = rf.CurrentTerm
			args.Entries, length = rf.generate_entries(rf.nextIndex[server])
			rf.mu.Unlock()
		}
	}
	rf.mu.Lock()
	rf.isRunning[server] = false
	rf.mu.Unlock()
}

// Start 开启对某个日志的一致性
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
// Start 告知服务器存在修改操作
func (rf *Raft) Start(command interface{}) (int, int, bool) {
	index := -1
	term := -1
	isLeader := true

	// Your code here (2B).
	isLeader = rf.state_event.GetState() == Leader
	if isLeader {
		rf.mu.Lock()
		lge := LogEntry{rf.CurrentTerm, command, command != nil}
		rf.Log = append(rf.Log, lge)
		rf.nextIndex[rf.me] = len(rf.Log)
		rf.matchIndex[rf.me] = len(rf.Log) - 1
		index = len(rf.Log) - 1
		term = rf.CurrentTerm
		rf.persist()
		rf.mu.Unlock()
		// fmt.Printf("S%d target index %d, value %d at %d\n", rf.me, index, command, term)
	}
	return index, term, isLeader
}

// Kill 断开 Raft 节点
// the tester doesn't halt goroutines created by Raft after each test,
// but it does call the Kill() method. your code can use killed() to
// check whether Kill() has been called. the use of atomic avoids the
// need for a lock.
//
// the issue is that long-running goroutines use memory and may chew
// up CPU time, perhaps causing later tests to fail and generating
// confusing debug output. any goroutine with a long-running loop
// should call killed() to check whether it should stop.
func (rf *Raft) Kill() {
	atomic.StoreInt32(&rf.dead, 1)
	// Your code here, if desired.
	rf.state_event.NotifyAs(Killed)
	rf.role.Wait()
}

func (rf *Raft) killed() bool {
	z := atomic.LoadInt32(&rf.dead)
	return z == 1
}

func (rf *Raft) StartRaftService() {
	rf.start_time = time.Now()
	rf.role = sync.WaitGroup{}
	// follower

	rf.role.Add(3)
	// follower什么都不需要做，只需要在超时的情况下跳转到candidate
	go func() {
		defer rf.role.Done()
		for !rf.killed() {
			// fmt.Printf("S%d wait as %d\n", me, Follower)
			rf.state_event.WaitAs(Follower)
			if rf.killed() {
				break
			}
			rf.mu.Lock()
			elapsed := time.Since(rf.editTime)
			rf.mu.Unlock()
			if elapsed.Milliseconds() > int64(rf.timeout) {
				// fmt.Printf("follower S%d:", rf.me)
				// fmt.Println(elapsed)
				rf.state_event.NotifyAs(Candidate)
			}
		}
	}()
	// candidate，发起一次投票
	go func() {
		defer rf.role.Done()
		for !rf.killed() {
			rf.state_event.WaitAs(Candidate)
			if rf.killed() {
				break
			}
			rf.mu.Lock()
			elapsed := time.Since(rf.editTime)
			rf.mu.Unlock()
			// redo election after elect time out
			if elapsed.Milliseconds() > int64(rf.timeout) {
				rf.toCandidate()
				resultChan := make(chan RequestVoteReply, len(rf.peers))
				cnt := 1
				var wg sync.WaitGroup
				// 使用go routine来加快获得投票结果
				for i := range rf.peers {
					if i == rf.me {
						continue
					}
					wg.Add(1)
					go rf.HandleRequestVote(i, &wg, resultChan)
				}
				// 等待关闭chan
				go func() {
					wg.Wait()
					close(resultChan)
				}()
				// 遍历通道
				for reply := range resultChan {
					term, _ := rf.GetState()
					// 投票结果比当前term高，则转为follower
					if reply.Term > term {
						rf.toFollower(reply.Term)
						break
					}
					// fmt.Print(reply)
					// fmt.Printf(" cnt: %d, CurrentTerm: %d\n", cnt, rf.CurrentTerm)
					if rf.state_event.GetState() != Candidate {
						break
					}
					// 查看票数
					if reply.Term <= term && reply.VotedGranted {
						cnt++
						if cnt > len(rf.peers)/2 {
							rf.toLeader()
							break
						}
					}
				}
				rf.mu.Lock()
				rf.editTime = time.Now()
				rf.mu.Unlock()
			}

		}
	}()

	// leader heart beat
	go func() {
		defer rf.role.Done()
		for !rf.killed() {
			rf.state_event.WaitAs(Leader)
			if rf.killed() {
				break
			}
			// send heart beat
			// fmt.Printf("leader S%d:", rf.me)x
			var wg sync.WaitGroup
			resultChan := make(chan AppendEntriesReply, len(rf.peers))
			// 检查resultChan
			go func() {
				for reply := range resultChan {
					term, _ := rf.GetState()
					// term 高，转为follower
					if reply.Term > term {
						rf.toFollower(reply.Term)
						break
					}
					// 检查matchIndex，当有半数以上的节点已更新成功enrty，则更新commitIndex
					// 并提交entry到状态机
					if rf.state_event.GetState() == Leader && reply.Success {
						rf.mu.Lock()
						commit_index := rf.commitIndex
						cnt := 0
						var lge LogEntry
						for ; lge.EntryTerm < rf.CurrentTerm && len(rf.Log) > commit_index+1; commit_index++ {
							lge = rf.Log[commit_index+1]
						}
						if len(rf.Log) <= commit_index {
							rf.mu.Unlock()
							continue
						}
						// fmt.Printf("%dmatchIndex ", rf.me)
						for _, value := range rf.matchIndex {
							// fmt.Printf("S%d:%d ", idx, value)
							if commit_index <= value {
								cnt++
							}
						}
						// fmt.Println()
						// ! if we don't have the a log send when a leader select, we have to change here
						if cnt > len(rf.peers)/2 && lge.EntryTerm <= term {
							for i := rf.commitIndex + 1; i <= commit_index; i++ {
								rf.msg <- ApplyMsg{rf.Log[i].Command != nil, rf.Log[i].Command, i}
							}
							// fmt.Printf("S%d commit from %d to %d at %d\n", me, rf.commitIndex+1, commit_index, rf.CurrentTerm)
							rf.commitIndex = commit_index
							rf.lastApplied = commit_index
						}
						rf.mu.Unlock()
					}
				}
			}()
			// 发送心跳和entry
			for i := range rf.peers {
				if i == rf.me {
					continue
				}
				wg.Add(1)
				go rf.HandleAppendEntries(i, &wg, resultChan)
				if rf.state_event.GetState() != Leader {
					break
				}
			}
			go func() {
				wg.Wait()
				close(resultChan)
			}()
			time.Sleep(time.Duration(heart_beat_interval) * time.Millisecond)
		}
	}()

	// initialize from state persisted before a crash
	rf.readPersist(rf.persister.ReadRaftState())
	rf.toFollower(rf.CurrentTerm)
	rf.mu.Lock()
	rf.leaderIndex = -1
	rf.editTime = time.Now()
	rf.mu.Unlock()
}

// the service or tester wants to create a Raft server. the ports
// of all the Raft servers (including this one) are in peers[]. this
// server's port is peers[me]. all the servers' peers[] arrays
// have the same order. persister is a place for this server to
// save its persistent state, and also initially holds the most
// recent saved state, if any. applyCh is a channel on which the
// tester or service expects Raft to send ApplyMsg messages.
// Make() must return quickly, so it should start goroutines
// for any long-running work.
func Make(peers []*utils.ClientEnd, me int,
	persister *Persister, applyCh chan ApplyMsg) *Raft {
	rf := &Raft{}
	rf.peers = peers
	rf.persister = persister
	rf.me = me
	rf.msg = applyCh

	// Your initialization code here (2A, 2B, 2C).
	rf.state_event = *NewEvent()
	rf.timeout = rand.Int()%(time_out_max-time_out_min) + time_out_min
	// fmt.Printf("%d: %d\n", rf.me, rf.timeout)
	return rf
}
