package cluster

import (
	"cluster-cache/store"
	"fmt"
	"github.com/hashicorp/raft"
	raftboltdb "github.com/hashicorp/raft-boltdb/v2"
	"log"
	"math"
	"strconv"
	"time"
)

func NoSnapshotConfig() *raft.Config {
	config := raft.DefaultConfig()
	config.LogLevel = "error"
	config.MaxAppendEntries = 1024
	config.SnapshotThreshold = math.MaxInt32
	config.SnapshotInterval = time.Hour * 24 * 365
	return config
}

// NewRaft 多个raft组可以公用一个fsm
func NewRaft(bootID int, fsm raft.FSM, dir string, trans raft.Transport) (*raft.Raft, *store.MMapLog) {
	// 1. Setup Raft configuration.
	config := NoSnapshotConfig()
	config.LocalID = raft.ServerID(strconv.Itoa(bootID))

	// 3. Create the snapshot store. This allows the Raft to truncate the log.
	snapshots := raft.NewDiscardSnapshotStore()

	// 4. Create the log store and stable store.
	logStore := store.NewLog(fmt.Sprintf("%s-log", dir))
	stableStore, err := raftboltdb.NewBoltStore(fmt.Sprintf("%s-table", dir))
	if err != nil {
		log.Fatalf("stable store error: %s", err)
		return nil, nil
	}

	instance, err := raft.NewRaft(config, fsm, logStore, stableStore, snapshots, trans)
	if err != nil {
		log.Fatalf("new raft: %s", err)
		return nil, nil
	}
	return instance, logStore
}

func bootstrap(r *raft.Raft, localId string, transport *raft.NetworkTransport, boot bool, address []string, save bool) {
	if save && boot {
		log.Printf("bootstrap master %s", localId)
		configuration := raft.Configuration{
			Servers: []raft.Server{
				{
					ID:      raft.ServerID(localId),
					Address: transport.LocalAddr(),
				},
			},
		}
		r.BootstrapCluster(configuration)
		go waitLeaderJoin(r, address, localId)
	}
}

func waitLeaderJoin(r *raft.Raft, address []string, bootID string) {
	// wait leader start up
	<-r.LeaderCh()
	for i := range address {
		if strconv.Itoa(i) == bootID {
			continue
		}
		err := join(r, strconv.Itoa(i), address)
		if err != nil {
			log.Fatalf("join cluster error: %v", err)
			return
		}
	}
}

func join(r *raft.Raft, followerID string, adders []string) error {
	id, err := strconv.Atoi(followerID)
	if err != nil {
		return nil
	}
	log.Printf("received join request for remote node %s at %s", followerID, adders[id])

	configFuture := r.GetConfiguration()
	if err = configFuture.Error(); err != nil {
		log.Printf("failed to get raft configuration: %v", err)
		return err
	}

	for _, srv := range configFuture.Configuration().Servers {
		// If a node already exists with either the joining node's ID or address,
		// that node may need to be removed from the config first.
		if srv.ID == raft.ServerID(followerID) || srv.Address == raft.ServerAddress(adders[id]) {
			// However if *both* the ID and the address are the same, then nothing -- not even
			// a join operation -- is needed.
			if srv.Address == raft.ServerAddress(adders[id]) && srv.ID == raft.ServerID(followerID) {
				log.Printf("node %s at %s already member of cluster, ignoring join request", followerID, adders[id])
				return nil
			}

			future := r.RemoveServer(srv.ID, 0, 0)
			if err = future.Error(); err != nil {
				log.Printf("error removing existing node %s at %s: %v", followerID, adders[id], err)
				return err
			}
		}
	}

	f := r.AddVoter(raft.ServerID(followerID), raft.ServerAddress(adders[id]), 0, 0)
	if f.Error() != nil {
		return f.Error()
	}
	log.Printf("node %s at %s joined successfully", followerID, adders[id])
	return nil
}
