package raft

import (
	"context"
	"time"
)

// runFollower returns if
// 1) it receives a valid AppendEntries;
// 2) it grants vote to a candidate;
// 3) election timeouts.
func (rf *Raft) runFollower(ctx context.Context) {
	timer, timeout := newElectionTimer()
	defer timer.Stop()

	rf.debug("waiting for requests...")
	for {
		select {
		case e := <-rf.requestVoteCh:
			if e.args.Term < rf.currentTerm {
				rf.denyRequestVote(e)
			} else {
				rf.checkTerm(e.args.Term)

				// only reset timeout after granting a candidate.
				if granted, first := rf.tryGrantVote(e); granted && first {
					return
				}
			}

		case e := <-rf.appendEntriesCh:
			if e.args.Term < rf.currentTerm {
				rf.denyAppendEntries(e)
			} else {
				rf.checkTerm(e.args.Term)

				// NOTE: follower's AppendEntries RPC handler should be idempotent.
				// since RPCs cancelled by the leader may still be delivered to followers.
				e.reply.Term = rf.currentTerm
				e.reply.ConflictTerm = -1
				e.reply.BacktrackIndex = -1
				e.reply.Success = rf.checkLogMatched(e.args.PrevLogTerm, e.args.PrevLogIndex)

				if e.reply.Success {
					for _, log := range e.args.Entries {
						// IMPORTANT: only trim logs if current log does not match!
						if rf.getLogTerm(log.Index) != log.Term {
							rf.log.Trim(log.Index)
							rf.log.Append(log)
						}
					}

					rf.persist()
					rf.log.Commit(e.args.LeaderCommit)
				} else {
					rf.debug("fail consistency check: AppendEntries %s", e.args)

					e.reply.ConflictTerm = rf.getLogTerm(e.args.PrevLogIndex)
					if e.reply.ConflictTerm == -1 {
						e.reply.BacktrackIndex = rf.getLastIndex()
					} else {
						e.reply.BacktrackIndex = rf.search(func(term int) bool {
							return term < e.reply.ConflictTerm
						})
					}
				}

				ack(e.done)
				return
			}

		case <-timer.C:
			rf.debug("election timeout: %dms", timeout)
			rf.startElection()
			return

		case e := <-rf.clientGetStateCh:
			rf.replyGetState(e)
		case e := <-rf.clientStartCh:
			rf.replyStart(e)

		case <-ctx.Done():
			return
		}
	}
}

type Vote struct {
	from  int
	reply *RequestVoteReply
}

func (rf *Raft) collect(
	ctx context.Context, server int, term int, lastLog *LogEntry,
	votes chan<- Vote, exited chan<- bool) {
	defer rf.debug("server=%d. collect worker stopped.", server)
	defer ack(exited)

	args := &RequestVoteArgs{
		Term:         term,
		CandidateID:  rf.me,
		LastLogTerm:  -1,
		LastLogIndex: -1,
	}

	if lastLog != nil {
		args.LastLogTerm = lastLog.Term
		args.LastLogIndex = lastLog.Index
	}

	type VoteEvent struct {
		ok    bool
		reply *RequestVoteReply
	}

	done := make(chan VoteEvent)
	send := func(ctx context.Context) {
		reply := &RequestVoteReply{}
		ok := rf.sendRequestVote(server, args, reply)

		select {
		case done <- VoteEvent{ok, reply}:
		case <-ctx.Done():
		}
	}

	go send(ctx)

	ticker := time.NewTicker(HEARTBEAT_SPAN * time.Millisecond)
	defer ticker.Stop()

	// if we have already received a granted vote, we never send it to the votes channel.
	// if we know that the server rejects explicitly, we can never receive a granted vote from it.
	voted := false
	failed := false
	select {
	case e := <-done:
		if e.ok && (!e.reply.VoteGranted || !voted) {
			rf.debug("new RequestVote reply from [%d/%d]", server, e.reply.Term)

			if e.reply.VoteGranted {
				voted = true
			} else {
				failed = true
			}

			select {
			case votes <- Vote{server, e.reply}:
			case <-ctx.Done():
			}
		}

	case <-ticker.C:
		// have a try if we haven't received any response before.
		if !voted && !failed {
			go send(ctx)
		}

	case <-ctx.Done():
	}
}

// runCandidate returns if the election ends.
func (rf *Raft) runCandidate(ctx context.Context) {
	timer, timeout := newElectionTimer()
	defer timer.Stop()

	// send RequestVote RPCs.
	votes := make(chan Vote)
	rf.debug("spawn vote collection workers")
	for i := 0; i < len(rf.peers); i++ {
		if i != rf.me {
			exited := make(chan bool)
			newCtx, cancel := context.WithCancel(ctx)
			defer func() {
				cancel()
				<-exited
			}()

			go rf.collect(newCtx, i, rf.currentTerm, rf.log.GetLastLog(), votes, exited)
		}
	}

	// self vote
	voted := 0
	go func(ctx context.Context, term int) {
		select {
		case votes <- Vote{rf.me, &RequestVoteReply{Term: term, VoteGranted: true}}:
		case <-ctx.Done():
		}
	}(ctx, rf.currentTerm)

	// main event loop.
	for {
		select {
		case v := <-votes:
			if rf.checkTerm(v.reply.Term) {
				return
			}

			if v.reply.VoteGranted {
				voted += 1
				rf.debug("new vote from [%d/%d]. voted=%d", v.from, v.reply.Term, voted)

				if voted >= rf.majority {
					rf.initLeader()
					return
				}
			}

		case e := <-rf.requestVoteCh:
			if rf.checkTerm(e.args.Term) {
				rf.replayRequestVote(e)
				return
			} else {
				// never vote others
				rf.denyRequestVote(e)
			}

		case e := <-rf.appendEntriesCh:
			if rf.checkTermWithEq(e.args.Term) {
				rf.replayAppendEntries(e)
				return
			} else {
				rf.denyAppendEntries(e)
			}

		case <-timer.C:
			rf.debug("election timeout: %dms", timeout)
			rf.startElection()
			return

		case e := <-rf.clientGetStateCh:
			rf.replyGetState(e)
		case e := <-rf.clientStartCh:
			rf.replyStart(e)

		case <-ctx.Done():
			return
		}
	}
}

type ReplicatedEvent struct {
	id           int
	server       int
	oldLastIndex int
	newLastIndex int
	logs         LogSlice
	reply        *AppendEntriesReply
}

func (rf *Raft) doAppendEntries(
	ctx context.Context,
	id, server, term, lastIndex int,
	logs LogSlice,
	intercept chan<- ReplicatedEvent,
) {
	// prepare AppendEntries RPC.
	args := &AppendEntriesArgs{
		Term:         term,
		LeaderID:     rf.me,
		LeaderCommit: -1,
		PrevLogTerm:  rf.getLogTerm(lastIndex),
		PrevLogIndex: lastIndex,
		Entries:      logs,
	}

	// send commitIndex as long as the follower catches up.
	if rf.getLogTerm(lastIndex) == term || lastIndex >= rf.getLastIndex() {
		args.LeaderCommit = rf.log.GetCommitIndex()
	}

	// start a new goroutine to handle the RPC.
	go func(ctx context.Context) {
		done := make(chan bool)
		reply := &AppendEntriesReply{}

		go func(ctx context.Context) {
			defer close(done)

			select {
			case done <- rf.sendAppendEntries(server, args, reply):
			case <-time.After(RPC_TIMEOUT * time.Millisecond):
			case <-ctx.Done():
			}
		}(ctx)

		select {
		case ok := <-done:
			if ok {
				select {
				case intercept <- ReplicatedEvent{
					id:           id,
					server:       server,
					oldLastIndex: lastIndex,
					newLastIndex: -1, // to be filled by sync worker
					logs:         args.Entries,
					reply:        reply,
				}:
				case <-ctx.Done():
				}
			}

		case <-ctx.Done():
		}
	}(ctx)
}

func (rf *Raft) synchronize(
	ctx context.Context,
	server, term int,
	replicated chan<- ReplicatedEvent,
	exited chan<- bool,
) {
	defer rf.debug("server=%d. sync worker stopped.", server)
	defer ack(exited)

	rpcCount := 0
	lastServed := 0
	lastIndex := rf.getLastIndex()
	lastActive := time.Now()

	// the first heartbeat contains at most one heartbeat
	// to establish its leadership as fast as possible.
	hub := rf.log.Fetch(lastIndex+1, lastIndex+2, NonBlocking)

	intercept := make(chan ReplicatedEvent)
	send := func(ctx context.Context, logs LogSlice) {
		rpcCount += 1
		lastActive = time.Now()
		rf.doAppendEntries(ctx, rpcCount, server, term, lastIndex, logs, intercept)
	}

	for {
		timeout := HEARTBEAT_SPAN*time.Millisecond - time.Since(lastActive)

		// either
		// 1) send new logs;
		// 2) timeout, and then send heartbeat with no entry.
		// 3) context cancelled.
		select {
		case logs := <-hub:
			hub = nil
			send(ctx, logs)

		case r := <-intercept:
			// to prevent out-of-order arrival and stale responses.
			if r.id <= lastServed || r.oldLastIndex != lastIndex {
				continue
			}
			lastServed = r.id

			if r.reply.Success {
				rf.debug("server=%d: lastIndex %d → %d", server, lastIndex, lastIndex+len(r.logs))
				lastIndex += len(r.logs)
				r.newLastIndex = lastIndex
			} else if lastIndex >= 0 {
				// fast last index backtracking
				// e.g.
				// 1111222333444: max { t < t0 }
				// 1111222345566: max { t <= t0 }
				//             ^ prev

				var newLastIndex int
				if r.reply.ConflictTerm != -1 {
					maxIndex := rf.search(func(term int) bool {
						return term <= r.reply.ConflictTerm
					})

					rf.debug("maxIndex=%d", maxIndex)
					if maxIndex != -1 && rf.getLogTerm(maxIndex) == r.reply.ConflictTerm {
						newLastIndex = maxIndex
					} else {
						newLastIndex = r.reply.BacktrackIndex
					}
				} else {
					newLastIndex = r.reply.BacktrackIndex
				}

				if newLastIndex >= lastIndex {
					newLastIndex = lastIndex - 1
				}
				if newLastIndex < -1 {
					rf.debug("warning: newLastIndex < -1")
					newLastIndex = -1
				}

				rf.debug("server=%d: lastIndex %d → %d, backtrack=%d",
					server, lastIndex, newLastIndex, r.reply.BacktrackIndex)
				lastIndex = newLastIndex
			} else {
				rf.debug("server %d: warning: lastIndex < 0 but RPC fails.", server)
			}

			// after AppendEntries completes, reopen fetching window.
			// it seems that we have to send all rest logs to speed up agreement.
			hub = rf.log.Fetch(lastIndex+1, -1)

			select {
			case replicated <- r:
			case <-ctx.Done():
			}

		case <-time.After(timeout):
			if hub == nil {
				// if last AppendEntries is in flight, but hasn't received a response,
				// we reopen the log window to resend this AppendEntries.
				hub = rf.log.Fetch(lastIndex+1, -1)

				// reserve some time for log.Fetch to respond.
				lastActive = lastActive.Add(FETCH_TIMEOUT * time.Millisecond)
			} else {
				// otherwise there's no new logs, just send a heartbeat.
				send(ctx, nil)
			}

		case <-ctx.Done():
			return
		}
	}
}

// runLeader keeps running until its leadership ends.
func (rf *Raft) runLeader(ctx context.Context) {
	// send heartbeat RPCs.
	rf.debug("spawn synchronization workers")
	replicated := make(chan ReplicatedEvent)
	for i := 0; i < len(rf.peers); i++ {
		if i != rf.me {
			exited := make(chan bool)
			newCtx, cancel := context.WithCancel(ctx)
			defer func() {
				cancel()
				<-exited
			}()

			go rf.synchronize(newCtx, i, rf.currentTerm, replicated, exited)
		}
	}

	// main event loop.
	for {
		select {
		case r := <-replicated:
			if rf.checkTerm(r.reply.Term) {
				return
			}

			if r.reply.Success &&
				r.newLastIndex > rf.matchIndex[r.server] {
				rf.matchIndex[r.server] = r.newLastIndex
				min, major, _ := rf.getSortedMatchIndex()

				// only commit logs in current term.
				if rf.getLogTerm(major) == rf.currentTerm {
					rf.log.Commit(major)
				} else if min != -1 {
					// we know that logs have been fully replicated, just commit them.
					rf.log.Commit(min)
				}
			}

		case e := <-rf.requestVoteCh:
			if rf.checkTerm(e.args.Term) {
				rf.replayRequestVote(e)
				return
			} else {
				// never vote others
				rf.denyRequestVote(e)
			}

		case e := <-rf.appendEntriesCh:
			if rf.checkTerm(e.args.Term) {
				rf.replayAppendEntries(e)
				return
			} else {
				rf.denyAppendEntries(e)
			}

		case e := <-rf.clientGetStateCh:
			rf.replyGetState(e)
		case e := <-rf.clientStartCh:
			rf.replyStart(e)

		case <-ctx.Done():
			return
		}
	}
}
