package kvnode

import (
	//"errors"
	"fmt"
	"github.com/sniperHW/fly/core/compress"
	"github.com/sniperHW/fly/core/queue"
	"github.com/sniperHW/fly/core/raft"
	"github.com/sniperHW/fly/errcode"
	"github.com/sniperHW/fly/net"
	"github.com/sniperHW/fly/net/cs"
	"github.com/sniperHW/fly/net/pb"
	flyproto "github.com/sniperHW/fly/proto"
	"os"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

/*
 *  这些预定义的Error类型，可以从其名字推出Desc,因此Desc全部设置为空字符串，以节省网络传输字节数
 */
var (
	Err_version_mismatch errcode.Error = errcode.New(errcode.Errcode_version_mismatch, "")
	Err_record_exist     errcode.Error = errcode.New(errcode.Errcode_record_exist, "")
	Err_record_notexist  errcode.Error = errcode.New(errcode.Errcode_record_notexist, "")
	Err_record_unchange  errcode.Error = errcode.New(errcode.Errcode_record_unchange, "")
	Err_cas_not_equal    errcode.Error = errcode.New(errcode.Errcode_cas_not_equal, "")
	Err_timeout          errcode.Error = errcode.New(errcode.Errcode_timeout, "")
)

type kvnode struct {
	muC     sync.Mutex
	clients map[*net.Socket]*net.Socket

	muS    sync.RWMutex
	stores map[int]*kvstore

	db        dbbackendI
	listener  *cs.Listener
	id        int
	mutilRaft *raft.MutilRaft
	stopOnce  sync.Once
	startOnce sync.Once

	shards []int
}

func verifyLogin(loginReq *flyproto.LoginReq) bool {
	return true
}

//临时使用
func (this *kvnode) getStoreByUnikey(unikey string) *kvstore {
	this.muS.RLock()
	defer this.muS.RUnlock()
	shard := this.shards[StringHash(unikey)%len(this.shards)]
	return this.stores[shard]
}

func (this *kvnode) initStores(peers map[int]string, shards []int) {
	this.muS.Lock()
	defer this.muS.Unlock()
	this.shards = shards
	for _, s := range shards {
		mainQueue := applicationQueue{
			q: queue.NewPriorityQueue(2, GetConfig().MainQueueMaxSize),
		}

		rn, snapshotterReady := raft.NewRaftNode(this.mutilRaft, mainQueue, (this.id<<16)+s, peers, false, GetConfig().Log.LogDir)
		store := &kvstore{
			rn:                 rn,
			db:                 this.db,
			mainQueue:          mainQueue,
			raftID:             rn.ID(),
			keyvals:            map[string]*kv{},
			proposalCompressor: &compress.ZipCompressor{},
			snapCompressor:     &compress.ZipCompressor{},
			unCompressor:       &compress.ZipUnCompressor{},
			snapshotter:        <-snapshotterReady,
			kvnode:             this,
			shard:              s,
		}
		store.lru.init()
		store.lease = newLease(store)
		this.stores[s] = store
		store.serve()
	}
}

func (this *kvnode) startListener() {
	this.listener.Serve(func(session *net.Socket, compress bool) {
		go func() {

			session.SetUserData(
				&conn{
					session:    session,
					pendingCmd: map[int64]replyAble{},
				},
			)

			this.muC.Lock()
			this.clients[session] = session
			this.muC.Unlock()

			session.SetRecvTimeout(flyproto.PingTime * 2)
			session.SetSendQueueSize(10000)

			//只有配置了压缩开启同时客户端支持压缩才开启通信压缩
			session.SetInBoundProcessor(cs.NewInboundProcessor(pb.GetNamespace("request"), compress))
			session.SetEncoder(cs.NewEncoder(pb.GetNamespace("response"), compress))
			session.SetCloseCallBack(func(session *net.Socket, reason error) {
				if u := session.GetUserData(); nil != u {
					switch u.(type) {
					case *conn:
						u.(*conn).clear()
					}
				}
				this.muC.Lock()
				delete(this.clients, session)
				this.muC.Unlock()
			})

			session.BeginRecv(func(session *net.Socket, v interface{}) {
				c := session.GetUserData()
				if nil == c {
					return
				}
				msg := v.(*cs.Message)
				cmd := msg.GetCmd()
				switch cmd {
				case flyproto.CmdType_Ping:
					session.Send(cs.NewMessage(cs.CommonHead{}, &flyproto.PingResp{
						Timestamp: time.Now().UnixNano(),
					}))
				case flyproto.CmdType_Cancel:
					req := msg.GetData().(*flyproto.Cancel)
					for _, v := range req.GetSeqs() {
						c.(*conn).removePendingCmdBySeqno(v)
					}
				case flyproto.CmdType_ReloadTableConf:
				default:
					unikey := msg.GetHead().UniKey

					store := this.getStoreByUnikey(unikey)
					if nil == store {
						respHead := msg.GetHead()
						respHead.Err = errcode.New(errcode.Errcode_error, fmt.Sprintf("%s not in current server", unikey))
						resp := cs.NewMessageWithCmd(cmd, respHead, nil)
						session.Send(resp)
					} else {
						if nil != store.addCliMessage(clientRequest{
							from: c.(*conn),
							msg:  msg,
						}) {
							respHead := msg.GetHead()
							GetSugar().Infof("reply retry")
							respHead.Err = errcode.New(errcode.Errcode_retry, "server is busy, please try again!")
							resp := cs.NewMessageWithCmd(cmd, respHead, nil)
							session.Send(resp)
						}
					}
				}
			})
		}()
	})
}

func waitCondition(fn func() bool) {
	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		for {
			time.Sleep(time.Millisecond * 100)
			if fn() {
				wg.Done()
				break
			}
		}
	}()
	wg.Wait()
}

func (this *kvnode) Stop() {
	this.stopOnce.Do(func() {
		//首先关闭监听,不在接受新到达的连接
		this.listener.Close()
		//关闭现有连接的读端，不会再接收新的req
		this.muC.Lock()
		for _, v := range this.clients {
			v.ShutdownRead()
		}
		this.muC.Unlock()

		//GetSugar().Info("------------------1--------------------------")

		//等待所有store响应处理请求
		waitCondition(func() bool {
			this.muS.RLock()
			defer this.muS.RUnlock()
			for _, v := range this.stores {
				if atomic.LoadInt32(&v.wait4ReplyCount) != 0 {
					return false
				}
			}
			return true
		})

		this.db.stop()

		//GetSugar().Info("------------------2--------------------------")

		//关闭现有连接
		this.muC.Lock()
		for _, v := range this.clients {
			v.Close(nil, time.Second*5)
		}
		this.muC.Unlock()
		waitCondition(func() bool {
			this.muC.Lock()
			defer this.muC.Unlock()
			return len(this.clients) == 0
		})

		//GetSugar().Info("------------------3--------------------------")

		this.muS.RLock()
		for _, v := range this.stores {
			v.stop()
		}
		this.muS.RUnlock()

		//GetSugar().Info("------------------4--------------------------")

		waitCondition(func() bool {
			this.muS.RLock()
			defer this.muS.RUnlock()
			return len(this.stores) == 0
		})
		//GetSugar().Info("------------------5--------------------------")

		this.mutilRaft.Stop()

		//GetSugar().Info("------------------6--------------------------")

	})
}

func (this *kvnode) Start(cluster *string, shards []int) error {
	var err error
	this.startOnce.Do(func() {

		config := GetConfig()

		if err = os.MkdirAll(config.Log.LogDir, os.ModePerm); nil != err {
			return
		}

		err = this.db.start()

		if nil != err {
			return
		}

		this.listener, err = cs.NewListener("tcp", fmt.Sprintf("%s:%d", config.ServiceHost, config.ServicePort), verifyLogin)

		if nil != err {
			return
		}

		clusterArray := strings.Split(*cluster, ",")

		peers := map[int]string{}

		var selfUrl string

		for _, v := range clusterArray {
			t := strings.Split(v, "@")
			if len(t) != 2 {
				panic("invaild peer")
			}
			i, err := strconv.Atoi(t[0])
			if nil != err {
				panic(err)
			}
			peers[i] = t[1]
			if i == this.id {
				selfUrl = t[1]
			}
		}

		go this.mutilRaft.Serve(selfUrl)

		this.initStores(peers, shards)

		this.startListener()

		GetSugar().Infof("flyfish start:%s:%d", config.ServiceHost, config.ServicePort)
	})
	return err
}

func NewKvNode(id int, db dbbackendI) *kvnode {
	return &kvnode{
		id:        id,
		mutilRaft: raft.NewMutilRaft(),
		clients:   map[*net.Socket]*net.Socket{},
		stores:    map[int]*kvstore{},
		db:        db,
	}
}
