package raft

// The file raftapi/raft.go defines the interface that raft must
// expose to servers (or the tester), but see comments below for each
// of these functions for more details.
//
// Make() creates a new raft peer that implements the raft interface.

import (
	"sort"
	"time"

	"6.5840/raftapi"
)

type Entry struct {
	Term    int
	Command any
}

type AppendEntriesArgs struct {
	Term         int
	LeaderId     int
	PrevLogIndex int
	PrevLogTerm  int
	Entries      []Entry
	LeaderCommit int
}

type appendEntriesMsg rpcServiceBaseMsg

type RejectAppendEntries struct {
	XTerm  int
	XIndex int
	XLen   int
}

type AppendEntriesReply struct {
	Term    int
	Success bool
	RejectAppendEntries
}

// === util ===

// 这个要加锁用
func getLastCommitedIndex(target []int) int {
	copied := make([]int, len(target))
	copy(copied, target)
	sort.Ints(copied)
	return copied[len(copied)>>1]
}

// 这个要加锁用,-1表示没找到;返回abs index
func (rf *Raft) firstIndexOfTerm(term int) int {
	left, right := 0, len(rf.Log)-1
	for left <= right {
		mid := (left + right) >> 1
		if rf.Log[mid].Term >= term {
			right = mid - 1
		} else {
			left = mid + 1
		}
	}
	if 0 <= left && left < len(rf.Log) && rf.Log[left].Term == term {
		return rf.relative2Abs(left)
	} else {
		return -1
	}
}

// 这个函数没加锁,-1表示没找到;返回abs index
func (rf *Raft) lastIndexOfTerm(term int) int {
	left, right := 0, len(rf.Log)-1
	for left <= right {
		mid := (left + right) >> 1
		if rf.Log[mid].Term <= term {
			left = mid + 1
		} else {
			right = mid - 1
		}
	}
	if 0 <= right && right < len(rf.Log) && rf.Log[right].Term == term {
		return rf.relative2Abs(right)
	} else {
		return -1
	}
}

// === commit & apply ===

func (rf *Raft) tryLeaderCommit() {
	rf.mu.Lock()
	if newCommitIndex := getLastCommitedIndex(rf.matchIndex); newCommitIndex > rf.commitIndex && rf.Log[rf.abs2Relative(newCommitIndex)].Term == rf.CurrentTerm {
		// Debug(dLeader, "S%d find new commitIndex %d, current is %d, matchIndex: %v", rf.me, getLastCommitedIndex(rf.matchIndex), rf.commitIndex, rf.matchIndex)
		rf.commitIndex = newCommitIndex
		go rf.startApplyService(rf.commitIndex)
	}
	rf.mu.Unlock()
}

// 用abs index; 0表示snapshot
func (rf *Raft) startApplyService(lastAppliedIndex int) {
	rf.startApplyChannel <- lastAppliedIndex
}

func (rf *Raft) applyService() {
	for !rf.killed() {
		lastAppliedIndex := <-rf.startApplyChannel
		applyMsgs := []raftapi.ApplyMsg{}
		rf.mu.Lock()
		debug_me := rf.me
		debug_term := rf.CurrentTerm
		debug_status := rf.status
		if lastAppliedIndex == 0 {
			if rf.lastIncludedIndex > rf.lastApplied {
				msg := raftapi.ApplyMsg{
					CommandValid:  false,
					SnapshotValid: true,
					Snapshot:      make([]byte, len(rf.snapshot)),
					SnapshotIndex: rf.lastIncludedIndex,
					SnapshotTerm:  rf.Log[0].Term,
				}
				copy(msg.Snapshot, rf.snapshot)
				applyMsgs = append(applyMsgs, msg)
				rf.lastApplied = max(rf.lastIncludedIndex, rf.lastApplied)
			}
		} else {
			// 在开始apply上锁前，有人上锁后改了snapshot；可能会导致apply out of order
			//---
			if i := rf.abs2Relative(rf.lastApplied + 1); i >= 1 {
				relativeLastAppliedIndex := rf.abs2Relative(lastAppliedIndex)
				for ; i <= relativeLastAppliedIndex; i++ {
					applyMsgs = append(applyMsgs, raftapi.ApplyMsg{CommandValid: true, Command: rf.Log[i].Command, CommandIndex: rf.relative2Abs(i)})
				}
				rf.lastApplied = max(lastAppliedIndex, rf.lastApplied)
			}
			//---
			// relativeLastAppliedIndex := rf.abs2Relative(lastAppliedIndex)
			// for i := max(rf.abs2Relative(rf.lastApplied+1), 1); i <= relativeLastAppliedIndex; i++ {
			// 	applyMsgs = append(applyMsgs, raftapi.ApplyMsg{CommandValid: true, Command: rf.Log[i].Command, CommandIndex: rf.relative2Abs(i)})
			// }
			// rf.lastApplied = max(lastAppliedIndex, rf.lastApplied)
			//---
			// relativeLastAppliedIndex := rf.abs2Relative(lastAppliedIndex)
			// for i := rf.abs2Relative(rf.lastApplied + 1); i <= relativeLastAppliedIndex; i++ {
			// 	applyMsgs = append(applyMsgs, raftapi.ApplyMsg{CommandValid: true, Command: rf.Log[i].Command, CommandIndex: rf.relative2Abs(i)})
			// }
			// rf.lastApplied = max(lastAppliedIndex, rf.lastApplied)
			//---
		}
		rf.mu.Unlock()
		for i := range applyMsgs {
			if lastAppliedIndex == 0 {
				Debug(dLog, "S%d status %d,T%d is applying snapshot index %d,term %d", debug_me, debug_status, debug_term, applyMsgs[i].SnapshotIndex, applyMsgs[i].SnapshotTerm)
			} else {
				Debug(dLog, "S%d, status %d, in T%d is applying {Command %v, CommandIndex %d}", debug_me, debug_status, debug_term, applyMsgs[i].Command, applyMsgs[i].CommandIndex)
			}
			if rf.killed() {
				return
			}
			rf.applyChannel <- applyMsgs[i]
		}
	}
}

// === AppendEntries ===

func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
	ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
	return ok
}

func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
	needPersist := false
	unconsistent := false
	rf.mu.Lock()
	defer rf.mu.Unlock()
	// if args.Entries == nil {
	// 	Debug(dLog, "S%d T%d receive HB from S%d, prev log index %d term %d", rf.me, rf.CurrentTerm, args.LeaderId, args.PrevLogIndex, args.PrevLogTerm)
	// } else {
	// 	Debug(dLog, "S%d T%d receive AE from S%d, prev log index %d term %d", rf.me, rf.CurrentTerm, args.LeaderId, args.PrevLogIndex, args.PrevLogTerm)
	// }
	if args.Term < rf.CurrentTerm {
		reply.Term = rf.CurrentTerm
		reply.Success = false
		return
	}
	rf.electionTimeout = false
	if args.Term > rf.CurrentTerm {
		rf.CurrentTerm = args.Term
		rf.becomeFollower()
		needPersist = true
	} else if rf.status == CANDIDATE {
		rf.becomeFollower()
		needPersist = true
	}

	// 不一致的情况
	if relativePrevLogIndex := rf.abs2Relative(args.PrevLogIndex); relativePrevLogIndex >= len(rf.Log) {
		reply.XTerm = -1
		reply.XLen = rf.relative2Abs(len(rf.Log))
		unconsistent = true
	} else if relativePrevLogIndex > 0 && rf.Log[relativePrevLogIndex].Term != args.PrevLogTerm {
		reply.XTerm = rf.Log[relativePrevLogIndex].Term
		rf.Log = rf.Log[:relativePrevLogIndex]
		needPersist = true
		reply.XLen = rf.relative2Abs(len(rf.Log))
		unconsistent = true
	}
	if unconsistent {
		// Debug(dLog, "S%d T%d is unconsistent", rf.me, rf.CurrentTerm)
		reply.Term = rf.CurrentTerm
		reply.Success = false
		reply.XIndex = rf.firstIndexOfTerm(reply.XTerm)
		if needPersist {
			rf.persist()
		}
		return
	}

	// 一致的情况
	// 在abs的index上看，合法的i(entries的索引)满足：args.PrevLogIndex+i+1>=rf.lastIncludedIndex+1，并且0<=i<len(args.Entries)
	entriesIdx := max(rf.lastIncludedIndex-args.PrevLogIndex, 0)
	relativeLogIndex := rf.abs2Relative(args.PrevLogIndex + entriesIdx + 1)
	for entriesIdx < len(args.Entries) {
		if relativeLogIndex == len(rf.Log) {
			rf.Log = append(rf.Log, args.Entries[entriesIdx:]...)
			needPersist = true
			break
		}
		if rf.Log[relativeLogIndex].Term != args.Entries[entriesIdx].Term {
			rf.Log = append(rf.Log[:relativeLogIndex], args.Entries[entriesIdx:]...)
			needPersist = true
			break
		}
		entriesIdx++
		relativeLogIndex++
	}

	rf.commitIndex = max(args.LeaderCommit, rf.commitIndex)
	rf.lastConsistentIndex = max(args.PrevLogIndex+len(args.Entries), rf.lastConsistentIndex)
	// Debug(dLog, "S%d T%d is consistent, commitIndex %d, lastConsistentIndex %d, lastApplied %d", rf.me, rf.CurrentTerm, rf.commitIndex, rf.lastConsistentIndex, rf.lastApplied)
	if toAppliedIndex := min(rf.lastConsistentIndex, rf.commitIndex); toAppliedIndex > rf.lastApplied {
		go rf.startApplyService(toAppliedIndex)
	}
	reply.Term = rf.CurrentTerm
	reply.Success = true
	if needPersist {
		rf.persist()
	}
}

// 这个没加锁
// 需要先加锁，并确认是leader才能用
// 有保护性操作
func (rf *Raft) makeAppendEntriesArgs(server int, isEmptyEntries bool) *AppendEntriesArgs {
	res := new(AppendEntriesArgs)
	res.Term = rf.CurrentTerm
	res.LeaderId = rf.me
	// if rf.nextIndex[server] <= 0 || rf.nextIndex[server] > len(rf.Log) {
	// 	Debug(dLeader, "S%d ,status %d, T%d, find nextIndex S%d is %d, log is %v", rf.me, rf.status, rf.CurrentTerm, server, rf.nextIndex[server], rf.Log)
	// }
	isSnapshot := rf.nextIndex[server] <= rf.lastIncludedIndex
	if isSnapshot {
		res.PrevLogIndex = rf.lastIncludedIndex
		res.PrevLogTerm = rf.Log[0].Term
	} else {
		res.PrevLogIndex = rf.nextIndex[server] - 1
		res.PrevLogTerm = rf.Log[rf.abs2Relative(res.PrevLogIndex)].Term
	}
	if isEmptyEntries || isSnapshot {
		res.Entries = nil
	} else {
		res.Entries = make([]Entry, rf.relative2Abs(len(rf.Log))-rf.nextIndex[server])
		copy(res.Entries, rf.Log[rf.abs2Relative(rf.nextIndex[server]):]) // 这里entries要copy出来用，不然data race
	}
	res.LeaderCommit = rf.commitIndex
	return res
}

// 这里有加锁，注意别死锁
// 返回true：能连接到server，并且没有发现不一致；或者是因为检查发现不是leader，你的任务完成了，返回true
// 返回false：不能连接到server，或者发现不一致
func (rf *Raft) appendEntriesOnceTo(server int, isHeartbeat bool, isAdditional bool) bool {
	rf.mu.Lock()
	if rf.status != LEADER ||
		((isAdditional || !isHeartbeat) && rf.nextIndex[server] == rf.relative2Abs(len(rf.Log))) {
		rf.mu.Unlock()
		return true
	}
	if !isHeartbeat && rf.nextIndex[server] <= rf.lastIncludedIndex {
		rf.mu.Unlock()
		go rf.installSnapshotTo(server)
		return true
	}
	args := rf.makeAppendEntriesArgs(server, isHeartbeat)
	// Debug(dLog, "S%d ,T%d, status %d, AE to S%d, len(entries)=%d, index range [%d, %d], LeaderCommit is %d", rf.me, rf.CurrentTerm, rf.status, server, len(args.Entries), args.PrevLogIndex+1, args.PrevLogIndex+len(args.Entries), args.LeaderCommit)
	rf.mu.Unlock()
	reply := &AppendEntriesReply{}
	ok := rf.sendAppendEntries(server, args, reply)
	if !ok {
		return false
	}
	return rf.handleAppendEntriesReply(server, args, reply)
}

// 这个有加锁
// 主要是更新nextIndex和matchIndex
// 返回这轮请求是否检查到不一致
// 返回ture：没有发现不一致，或者这个reply的term过期了，或者leader的term过期了
// 返回flase：发现不一致
func (rf *Raft) handleAppendEntriesReply(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) bool {
	rf.mu.Lock()
	defer rf.mu.Unlock()
	if rf.status != LEADER || args.Term < rf.CurrentTerm {
		// 这是过期的数据，不要回答，不要回答，不要回答
		return true
	}
	if reply.Success {
		rf.nextIndex[server] = max(rf.nextIndex[server], args.PrevLogIndex+len(args.Entries)+1)
		rf.matchIndex[server] = max(rf.matchIndex[server], rf.nextIndex[server]-1) // 这个肯定单增的，在这个leader活着的时候
		go rf.tryLeaderCommit()
		if rf.nextIndex[server] < rf.relative2Abs(len(rf.Log)) {
			// Debug(dLeader, "S%d T%d, go appendEntriesTo S%d", rf.me, rf.CurrentTerm, server)
			go rf.appendEntriesTo(server)
		}
		return true
	} else {
		// leader的term已经过期了
		if reply.Term > rf.CurrentTerm {
			rf.CurrentTerm = reply.Term
			rf.becomeFollower()
			rf.persist()
			return true
		}
		// 处理不一致
		if reply.XIndex == -1 {
			rf.nextIndex[server] = min(reply.XLen, rf.nextIndex[server])
			// Debug(dLeader, "S%d unconsistent case %d of S%d, T%d, args %v reply %v, set nextIndex %d", rf.me, 3, server, rf.CurrentTerm, args, reply, rf.nextIndex[server])
		} else {
			if lastIndexOfTerm := rf.lastIndexOfTerm(reply.XTerm); lastIndexOfTerm == -1 {
				rf.nextIndex[server] = min(reply.XIndex, rf.nextIndex[server])
				// Debug(dLeader, "S%d unconsistent case %d of S%d, T%d, args %v reply %v, set nextIndex %d", rf.me, 1, server, rf.CurrentTerm, args, reply, rf.nextIndex[server])
			} else {
				rf.nextIndex[server] = min(lastIndexOfTerm+1, rf.nextIndex[server])
				// rf.nextIndex[server] = min(reply.XIndex, lastIndexOfTerm+1)
				// Debug(dLeader, "S%d unconsistent case %d of S%d, T%d, args %v reply %v, set nextIndex %d", rf.me, 2, server, rf.CurrentTerm, args, reply, rf.nextIndex[server])
			}
		}
		if rf.nextIndex[server] <= rf.lastIncludedIndex {
			go rf.installSnapshotTo(server)
		} else {
			go rf.additionalHeartbeatTo(server, TIGHT_LOOP_INTERVAL)
		}
		return false
	}
}

// 向指定server发送appendEntries，最多N_MAX_TRY次
// 如果成功，提前返回
func (rf *Raft) appendEntriesTo(server int) {
	rf.appendEntriesChan[server] <- 0
	for n_tried := 0; !rf.killed() && n_tried < N_MAX_TRY; n_tried++ {
		// 如果成功了，就返回，不然再来一轮
		if rf.appendEntriesOnceTo(server, false, false) {
			break
		}
		// no tight loop.
		time.Sleep(time.Duration(TIGHT_LOOP_INTERVAL) * time.Millisecond)
	}
	<-rf.appendEntriesChan[server]
}

// === Heartbeat ===

// 直到发现自己不是leader，一直向所有server发出heartbeat
func (rf *Raft) heartbeat() {
	rf.mu.Lock()
	n_peers := len(rf.peers)
	for !rf.killed() && rf.status == LEADER {
		// Debug(dInfo, "S%d starts sending heartbeat", rf.me)
		me := rf.me
		rf.mu.Unlock()
		// Debug(dLeader, "S%d heartbeat", me)
		for i := range n_peers {
			if i != me {
				go rf.appendEntriesOnceTo(i, true, false)
			}
		}
		time.Sleep(time.Duration(HEARTBEAT_INTERVAL) * time.Millisecond)
		rf.mu.Lock()
	}
	rf.mu.Unlock()
}

func (rf *Raft) heartbeatService() {
	for !rf.killed() {
		<-rf.startHeartbeatChannel
		// Debug(dInfo, "S%d heartbeat service receive start sign", rf.me)
		rf.heartbeat()
	}
	// Debug(dLog, "S%d is killed. heartbeatService() is stopped.", rf.me)
}

func (rf *Raft) startHeartbeatService(sign int) {
	rf.startHeartbeatChannel <- sign
}

func (rf *Raft) additionalHeartbeatTo(server int, sleepTime int) {
	time.Sleep(time.Duration(sleepTime) * time.Millisecond)
	rf.appendEntriesOnceTo(server, true, true)
}
