package http

import (
	"cluster-cache/cluster"
	"cluster-cache/common"
	"cluster-cache/exp"
	"cluster-cache/meta"
	"cluster-cache/model"
	"cluster-cache/rpc"
	"cluster-cache/store"
	"github.com/cespare/xxhash/v2"
	"github.com/goccy/go-json"
	"github.com/panjf2000/gnet/pkg/logging"
	"github.com/syndtr/goleveldb/leveldb"
	"net/url"
	"os"
	"strconv"
	"strings"
	"sync"
	"time"
)

type Router struct {
	sync.Mutex
	config *cluster.Config
	kv     model.KeyValue
	ss     model.SortSet

	fsm *cluster.SingleFsm
}

const dataDir = "/tmp/data"
const configPath = "/tmp/data/config"
const masterId = 1
const LockTableSize = 1024 * 8
const raftTimeout = 5 * time.Second
const levelDbSize = 3
const DataDir = "/data/data"
const BucketIndexSize = 1024 * 1024 // 80MB

var okString = []byte("ok")
var bootId = ""

func NewRouter(id string) *Router {
	err := store.MakeDir(dataDir)
	if err != nil {
		panic(err)
	}
	bootId = id
	// 初始化集群配置, 之前是否已经保存了集群ip
	config := cluster.NewConfig(configPath + bootId)
	router := &Router{
		config: config,
	}
	// 初始化keyvalue sortset模型, 加载磁盘中的数据
	router.kv = exp.NewFastMap(LockTableSize, &common.CityHash{})
	router.ss = model.NewMultiSortSet(LockTableSize)
	if !config.NeedInit() {
		mem := &store.MemoryStore{
			KV: router.kv, SS: router.ss,
			Indexes: store.NewBucketIndex(BucketIndexSize),
		}
		hosts, cid := config.GetCluster()
		router.fsm = cluster.NewSingleFsm(mem, hosts, cid, masterId, dataDir+bootId, config)
		router.fsm.GetLeader()
		router.fsm.Start = true
	}
	return router
}

var builderPool = sync.Pool{New: func() interface{} {
	return strings.Builder{}
}}

var emptyStringArray = make([]string, 0)

func (s *Router) Publish(codec *rpc.HttpCodec, body []byte) {
	switch codec.Parser.Method[0] {
	case 'G':
		s.Get(codec)
		break
	case 'P':
		s.Post(codec, body)
	}
	if codec.OutBuf.Len() == 0 {
		codec.Append400Text()
	}
}

func (s *Router) Get(codec *rpc.HttpCodec) {
	if len(codec.Parser.Path) <= 2 {
		return
	}
	switch codec.Parser.Path[1] {
	case 'i':
		// init
		s.Init(codec)
		break
	case 'q':
		// query
		s.Query(codec)
		break
	case 'd':
		// del
		s.Del(codec)
		break
	case 'z':
		// zrmv
		s.ZRmv(codec)
		break
	}
}

func (s *Router) Post(codec *rpc.HttpCodec, body []byte) {
	if len(codec.Parser.Path) <= 2 {
		return
	}

	switch codec.Parser.Path[1] {
	case 'u':
		//updateCluster
		var config common.ClusterConfig
		err := json.Unmarshal(body, &config)
		if err != nil {
			return
		}
		s.UpdateCluster(codec, config)
		break
	case 'a':
		// add
		var kv common.KeyValue
		err := json.Unmarshal(body, &kv)
		if err != nil {
			return
		}
		s.Add(codec, &kv)
		break
	case 'l':
		// list
		var val []string
		err := json.Unmarshal(body, &val)
		if err != nil {
			return
		}
		s.List(codec, val)
		break
	case 'b':
		// batch
		var kvs common.KeyValues
		err := json.Unmarshal(body, &kvs.Data)
		if err != nil {
			return
		}
		s.Batch(codec, &kvs)
		break
	case 'z':
		if len(codec.Parser.Path) <= 3 {
			break
		}
		switch codec.Parser.Path[2] {
		case 'a':
			var val common.ValueScore
			err := json.Unmarshal(body, &val)
			if err != nil {
				return
			}
			s.ZAdd(codec, &val)
			break
		case 'r':
			// zrange
			var val common.RangeScore
			err := json.Unmarshal(body, &val)
			if err != nil {
				return
			}
			s.ZRange(codec, &val)
			break
		}
	}
}

func (s *Router) UpdateCluster(codec *rpc.HttpCodec, config common.ClusterConfig) {
	s.Lock()
	defer s.Unlock()
	if !s.config.NeedInit() {
		leader, id := s.fsm.GetLeader()
		logging.Infof("already init cluster leader=%s,leaderId=%s, index=%d", leader, id, s.fsm.Raft.LastIndex())
		codec.Append200TextData(okString)
		return
	}
	logging.Infof("index=%d", *config.Index)
	if s.fsm == nil {
		for i, host := range config.Hosts {
			logging.Infof("host%d=%s", i, host)
			if len(host) == 0 {
				return
			}
		}
		mem := &store.MemoryStore{
			KV: s.kv, SS: s.ss,
			Indexes: store.NewBucketIndex(BucketIndexSize),
		}
		s.fsm = cluster.NewSingleFsm(mem, config.Hosts, *config.Index-1, masterId, dataDir+bootId, s.config)
	}
	// 异步加载完成数据 start 设置为true
	go func() {
		s.fsm.GetLeader()
		if s.fsm.IsLeader() {
			// leader 负责加载数据
			for i := 1; i <= levelDbSize; i++ {
				path := DataDir + strconv.Itoa(i)
				s.LoadingDataFromLevelDb(path)
			}
			// leader 通知其他节点集群已经ok了
			buf := meta.BuildClusterInitSuccess()
			futures := s.fsm.Notice(buf)
			for i := 0; i < len(futures); i++ {
				futures[i].WaitDone()
			}
		}
		logging.Infof("loading data from level db success")
		s.fsm.Start = true
	}()
	codec.Append200TextData(okString)
}

func (s *Router) LoadingDataFromLevelDb(path string) {
	logging.Infof("loading from %s", path)
	old, err := leveldb.OpenFile(path, nil)
	if err != nil {
		panic(err)
	}
	iter := old.NewIterator(nil, nil)
	cnt := 0
	futures := make([]rpc.CallFuture, 0)
	kv := &common.KeyValue{}
	for iter.Next() {
		cnt++
		kv.Key = string(iter.Key())
		kv.Value = string(iter.Value())
		buf := make([]byte, 8+len(kv.Key)+len(kv.Value))
		n := meta.EncodeAdd(kv, buf)
		single := s.fsm.Write(buf[:n], raftTimeout)
		futures = append(futures, single...)
	}
	for i := 0; i < len(futures); i++ {
		futures[i].WaitDone()
	}
	logging.Infof("loading from %s size=%d", path, cnt)
	err = old.Close()
	if err != nil {
		panic(err)
	}
	err = os.RemoveAll(path)
	if err != nil {
		panic(err)
	}
}

func (s *Router) Init(codec *rpc.HttpCodec) {
	if s.fsm.Start {
		codec.Append200TextData(okString)
	}
}

func (s *Router) Query(codec *rpc.HttpCodec) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	// query
	key := common.GetKey(codec.Parser.Path[6:])
	if len(key) == 0 {
		return
	}
	ks, err := url.QueryUnescape(string(key))
	if err != nil {
		return
	}
	hash := xxhash.Sum64(key)
	lastIndex := s.fsm.LastIndex(hash)
	s.fsm.WaitApply(lastIndex)
	v := s.kv.Query(common.StringToBytes(ks))
	if v == nil {
		codec.Append404Text()
	} else {
		codec.Append200TextData(v)
	}
}

func (s *Router) Del(codec *rpc.HttpCodec) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	// del
	key := common.GetKey(codec.Parser.Path[4:])
	if len(key) == 0 {
		return
	}
	ks, err := url.QueryUnescape(string(key))
	if err != nil {
		return
	}
	n := meta.EncodeDel([]byte(ks), codec.FixBuf)
	futures := s.fsm.Write(codec.FixBuf[:n], raftTimeout)
	for i := 0; i < len(futures); i++ {
		futures[i].WaitDone()
	}
	codec.Append200Text()
}

func (s *Router) ZRmv(codec *rpc.HttpCodec) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	// zrmv
	key, value := common.GetKeyValue(codec.Parser.Path[5:])
	if len(key) == 0 || len(value) == 0 {
		return
	}
	ks, err := url.QueryUnescape(string(key))
	if err != nil {
		return
	}
	vs, err := url.QueryUnescape(string(value))
	if err != nil {
		return
	}
	n := meta.EncodeZRmv([]byte(ks), []byte(vs), codec.FixBuf)
	futures := s.fsm.Write(codec.FixBuf[:n], raftTimeout)
	for i := 0; i < len(futures); i++ {
		futures[i].WaitDone()
	}
	codec.Append200Text()
}

func (s *Router) Add(codec *rpc.HttpCodec, val *common.KeyValue) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	// add
	if len(val.Key) == 0 || len(val.Value) == 0 {
		codec.Append400Text()
		return
	}
	n := meta.EncodeAdd(val, codec.FixBuf)
	futures := s.fsm.Write(codec.FixBuf[:n], raftTimeout)
	for i := 0; i < len(futures); i++ {
		futures[i].WaitDone()
	}
	codec.Append200Text()
}

func (s *Router) List(codec *rpc.HttpCodec, val []string) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	// list
	keys := make([][]byte, len(val))
	values := make([][]byte, len(val))
	size := 0
	for i, key := range val {
		keys[i] = common.StringToBytes(key)
	}
	for i := range keys {
		hash := xxhash.Sum64(keys[i])
		lastIndex := s.fsm.LastIndex(hash)
		s.fsm.WaitApply(lastIndex)
		v := s.kv.Query(keys[i])
		if v != nil {
			values[i] = v
			size++
		} else {
			values[i] = nil
		}
	}
	if size == 0 {
		codec.Append200JsonData([]byte("[]"))
		return
	}
	builder := builderPool.Get().(strings.Builder)
	common.BuildKVJson(&builder, keys, values, size)
	codec.Append200JsonData([]byte(builder.String()))
	builderPool.Put(builder)
}

func (s *Router) Batch(codec *rpc.HttpCodec, val *common.KeyValues) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	res := make([]byte, 0)
	size := 0
	for _, item := range val.Data {
		if len(item.Key) == 0 || len(item.Value) == 0 {
			continue
		}
		FixBuf := make([]byte, len(item.Key)+len(item.Value)+16)
		n := meta.EncodeAddSingle(item.Key, item.Value, FixBuf)
		res = append(res, FixBuf[:n]...)
		size++
	}
	if size == 0 {
		return
	}
	futures := s.fsm.Write(res, raftTimeout)
	for i := 0; i < len(futures); i++ {
		futures[i].WaitDone()
	}
	codec.Append200Text()
}

func (s *Router) ZAdd(codec *rpc.HttpCodec, val *common.ValueScore) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	key := common.GetKey(codec.Parser.Path[5:])
	if len(key) == 0 {
		return
	}
	ks, err := url.QueryUnescape(string(key))
	if err != nil {
		return
	}
	// zadd
	if val.Score == nil || len(val.Value) == 0 {
		return
	}
	n := meta.EncodeZAdd([]byte(ks), val, codec.FixBuf)
	futures := s.fsm.Write(codec.FixBuf[:n], raftTimeout)
	for i := 0; i < len(futures); i++ {
		futures[i].WaitDone()
	}
	codec.Append200Text()
}

func (s *Router) ZRange(codec *rpc.HttpCodec, val *common.RangeScore) {
	if s.fsm == nil || !s.fsm.Start {
		return
	}
	key := common.GetKey(codec.Parser.Path[7:])
	if len(key) == 0 {
		return
	}
	ks, err := url.QueryUnescape(string(key))
	if err != nil {
		return
	}
	if val.MinScore == nil || val.MaxScore == nil {
		return
	}
	hash := xxhash.Sum64(key)
	lastIndex := s.fsm.LastIndex(hash)
	s.fsm.WaitApply(lastIndex)
	min := strconv.FormatFloat(*val.MinScore, 'f', -1, 64)
	max := strconv.FormatFloat(*val.MaxScore, 'f', -1, 64)
	values, scores := s.ss.Range(ks, min, max)
	if values == nil || scores == nil || len(values) == 0 || len(scores) == 0 {
		codec.Append200JsonData([]byte("[]"))
		return
	}
	builder := builderPool.Get().(strings.Builder)
	common.BuildVsJson(&builder, values, scores)
	codec.Append200JsonData([]byte(builder.String()))
	builderPool.Put(builder)
}
