package raft

import (
	"bytes"

	"6.824/labgob"
)

type InstallSnapshotArgs struct {
	Term              int
	LeaderId          int
	LastIncludedIndex int
	LastIncludedTerm  int
	Data              []byte
}

type InstallSnapshotReply struct {
	Term int
}

// 发送 InstallSnapshot
func (rf *Raft) sendInstallSnapshot(server int, args *InstallSnapshotArgs, reply *InstallSnapshotReply) bool {
	ok := rf.peers[server].Call("Raft.InstallSnapshot", args, reply)
	return ok
}

// peer 接受 leader InstallSnapshot
func (rf *Raft) InstallSnapshot(args *InstallSnapshotArgs, reply *InstallSnapshotReply) {

	rf.mu.Lock()
	defer rf.mu.Unlock()
	// DPrintf("[%d]: (term %d) follower 收到 [%v] AppendEntries %v, prevIndex %v, prevTerm %v", rf.me, rf.currentTerm, args.LeaderId, args.Entries, args.PrevLogIndex, args.PrevLogTerm)

	reply.Term = rf.currentTerm

	// install snapshot rpc 1
	if args.Term < rf.currentTerm {
		return
	}

	if args.Term > rf.currentTerm {
		rf.setNewTerm(args.Term)
		return
	}

	if rf.state == Candidate {
		rf.state = Follower
	}

	if args.LastIncludedIndex <= rf.log.LastIncludedIndex {
		return
	}

	rf.resetElectionTimer()

	msg := ApplyMsg{
		SnapshotValid: true,
		Snapshot:      args.Data,
		SnapshotTerm:  args.LastIncludedTerm,
		SnapshotIndex: args.LastIncludedIndex,
	}

	go func() {
		rf.applyCh <- msg
	}()
}

func (rf *Raft) leaderSendInstallSnapshots(serverId int, args *InstallSnapshotArgs) {
	rf.mu.Lock()
	// args := InstallSnapshotArgs{
	// 	Term: 			rf.currentTerm,
	// 	LeaderId: 		rf.me,
	// 	LastIncludedIndex: 	rf.log.LastIncludedIndex,
	// 	LastIncludedTerm:	rf.log.LastIncludedTerm,
	// 	Data: 			rf.snapshot,
	// }
	reply := InstallSnapshotReply{}
	rf.mu.Unlock()

	ok := rf.sendInstallSnapshot(serverId, args, &reply)

	if !ok {
		return
	}

	rf.mu.Lock()
	defer rf.mu.Unlock()

	if rf.state != Leader || args.Term != rf.currentTerm {
		return
	}

	if reply.Term > rf.currentTerm {
		rf.setNewTerm(reply.Term)
		return
	}

	rf.matchIndex[serverId] = args.LastIncludedIndex
	rf.nextIndex[serverId] = args.LastIncludedIndex + 1
}

//
// A service wants to switch to snapshot.  Only do so if Raft hasn't
// have more recent info since it communicate the snapshot on applyCh.
//
func (rf *Raft) CondInstallSnapshot(lastIncludedTerm int, lastIncludedIndex int, snapshot []byte) bool {

	// Your code here (2D).
	rf.mu.Lock()
	defer rf.mu.Unlock()

	if lastIncludedIndex <= rf.commitIndex {
		return false
	}

	defer func() {
		rf.log.LastIncludedIndex = lastIncludedIndex
		rf.log.LastIncludedTerm = lastIncludedTerm
		rf.snapshot = snapshot
		rf.commitIndex = lastIncludedIndex // important
		rf.lastApplied = lastIncludedIndex // important
		rf.persistStateAndSnapshot(snapshot)
	}()

	// installSnapshot RPC 6
	if lastIncludedIndex <= rf.log.lastLog().Index && rf.log.at(lastIncludedIndex).Term == lastIncludedTerm {
		rf.log.Entries = append([]Entry{{Term: 0, Command: nil, Index: 0}}, rf.log.Entries[lastIncludedIndex-rf.log.LastIncludedIndex:]...)
		return true
	}

	rf.log.Entries = []Entry{{Term: 0, Command: nil, Index: 0}}

	return true
}

// the service says it has created a snapshot that has
// all info up to and including index. this means the
// service no longer needs the log through (and including)
// that index. Raft should now trim its log as much as possible.
func (rf *Raft) Snapshot(index int, snapshot []byte) {
	// Your code here (2D).
	rf.mu.Lock()
	defer rf.mu.Unlock()

	// already snapshot or uncommit
	if index <= rf.log.LastIncludedIndex || index > rf.commitIndex {
		return
	}

	rf.log.Entries = append([]Entry{{Term: 0, Command: nil, Index: 0}}, rf.log.Entries[index-rf.log.LastIncludedIndex+1:]...)
	rf.log.LastIncludedIndex = index
	rf.log.LastIncludedTerm = rf.log.at(index).Term
	rf.snapshot = snapshot
	rf.persistStateAndSnapshot(snapshot)
}

func (rf *Raft) persistStateAndSnapshot(snapshot []byte) {

	w := new(bytes.Buffer)
	e := labgob.NewEncoder(w)
	// Persistent state on all servers
	e.Encode(rf.currentTerm)
	e.Encode(rf.votedFor)
	e.Encode(rf.log)
	data := w.Bytes()

	rf.persister.SaveStateAndSnapshot(data, snapshot)
}
