package e3dds

import (
	"fmt"
	"math"
	"sync/atomic"

	"gitee.com/ameise84/e3dds/internal/fast_chan"
	"gitee.com/ameise84/e3dds/internal/log"
	"gitee.com/ameise84/e3dds/internal/message"
	"gitee.com/ameise84/e3lock"
	"gitee.com/ameise84/e3pool/go_pool"
	time "gitee.com/ameise84/e3time"
	"gitee.com/ameise84/e3utils/safe"
	"gitee.com/ameise84/e3utils/str_conv"
	"github.com/pkg/errors"
)

var (
	_gCallTimeOut = time.Duration(30) * time.Second
)

const (
	timerLiveCheck = iota + math.MaxUint16 + 1
)

func newActorLocal(msgSize int32) *actorLocal {
	a := &actorLocal{
		timerMan:    newTimerManger(),
		rspChan:     make(chan *message.Msg, 1), //初始化一个等待位置
		exitChan:    make(chan struct{}),
		msgQueue:    fast_chan.New[*message.Msg](msgSize),
		timeOutCall: make(map[uint64]time.Time, 8),
	}
	a.runner = go_pool.NewGoRunner(a, "actor local loop")
	return a
}

type actorLocal struct {
	message.Source
	timerMan     timerManager
	actor        Actor
	ctx          any
	actorFactory *entity
	runner       go_pool.GoRunner
	msgQueue     fast_chan.IQueue[*message.Msg]
	callId       atomic.Uint64
	waitRsp      bool
	waitCallId   uint64
	rspChan      chan *message.Msg
	exitChan     chan struct{}
	alive        atomic.Bool
	aliveTime    time.Time
	mu           e3lock.SpinLock
	running      bool
	stopOnce     e3lock.Once
	timeOutCall  map[uint64]time.Time
	rpcProxy     *message.Source
	netProxy     NetContext
	isNetActor   bool
}

func (ts *actorLocal) OnPanic(err error) {
	log.GetLogger().Error("OnPanic").Object(ts).Err(err).Println()
}

func (ts *actorLocal) E3LogMarshall() string {
	return fmt.Sprintf("actor local[%s:%s]", ts.ServiceID(), ts.ClerkID())
}

func (ts *actorLocal) GetSource() *Source {
	return &ts.Source
}

func (ts *actorLocal) OnRecvNetMsg(netMsg message.Message) error {
	msg := netMsg.(*message.Msg)
	ts.mu.Lock()
	defer ts.mu.Unlock()
	if !ts.alive.Load() {
		return ErrorActorIsNotAlive
	}
	msg.CheckStepTime("actor.Rpc.OnRecvMsg")
	err := ts.msgQueue.Write(msg)
	if err != nil {
		return ErrorMessageQueueIsFull
	}
	return nil
}

func (ts *actorLocal) BindNetContext(ctx NetContext) {
	ts.netProxy = ctx
}

func (ts *actorLocal) UnBindNetContext() {
	ts.netProxy = nil
}

func (ts *actorLocal) OnResponse(rsp message.Response) {
	msg := rsp.(*message.Msg)
	msg.CheckStepTime("actor.Rpc.OnResponse")
	ts.mu.Lock()
	defer ts.mu.Unlock()
	rspID := rsp.GetMsgID()
	if !ts.waitRsp {
		ts.checkResponseFailedReason(rsp)
		return
	}
	if ts.waitCallId == rspID {
		newRsp := message.TakeMessage()
		newRsp.CloneData(msg)
		ts.rspChan <- newRsp
	} else {
		ts.checkResponseFailedReason(rsp)
	}
}

func (ts *actorLocal) GetActor() Actor {
	return ts.actor
}

func (ts *actorLocal) BindContext(ctx any) {
	ts.ctx = ctx
}

func (ts *actorLocal) GetContext() any {
	return ts.ctx
}

func (ts *actorLocal) AttachServicePush(toSrvID, toCID string) {
	msg := ts.buildMsg(message.MsgEngine, toSrvID, "", toCID, attachPushHandler, nil)
	_gRouterProvider.post(msg)
}

func (ts *actorLocal) Push(protoc string, body []byte) {
	if ts.netProxy != nil {
		ts.netProxy.OnPush(protoc, body)
		return
	}

	if ts.rpcProxy == nil {
		return
	}

	msg := message.TakeMessage()
	msg.SetKind(message.MsgPush)
	msg.SetRspHandler(nil)
	msg.SetID(ts.callId.Add(1))
	msg.SetFromActor(ts.ServiceID(), ts.SessionID(), ts.ClerkID())
	msg.SetToActor(ts.rpcProxy.ServiceID(), ts.rpcProxy.SessionID(), ts.rpcProxy.ClerkID())
	msg.SetMsg(protoc, body)
	_gRouterProvider.post(msg)
}

func (ts *actorLocal) Cast(toSrvID, toCID string, protoc string, body []byte) {
	msg := ts.buildMsg(message.MsgCast, toSrvID, "", toCID, protoc, body)
	msg.SetRspHandler(nil)
	_gRouterProvider.post(msg)
}

func (ts *actorLocal) Call(toSrvID, toCID string, protoc string, body []byte, rsp message.Response) error {
	msg := ts.buildMsg(message.MsgCall, toSrvID, "", toCID, protoc, body)
	return ts.postCall(msg, rsp)
}

func (ts *actorLocal) CastFix(to *message.Source, protoc string, body []byte) {
	msg := ts.buildMsg(message.MsgCast, to.ServiceID(), to.SessionID(), to.ClerkID(), protoc, body)
	_gRouterProvider.post(msg)
}

func (ts *actorLocal) CallFix(to *message.Source, protoc string, body []byte, rsp message.Response) error {
	msg := ts.buildMsg(message.MsgCall, to.ServiceID(), to.SessionID(), to.ClerkID(), protoc, body)
	return ts.postCall(msg, rsp)
}

func (ts *actorLocal) Redirect(toSrvID, toCID string, msg message.Request) {
	newMsg := msg.(*message.Msg).BuildForward(toSrvID, "", toCID)
	_gRouterProvider.post(newMsg)
}

func (ts *actorLocal) ForwardCall(toSrvID, toCID string, msg message.Request, rsp message.Response) error {
	newMsg := message.TakeMessage()
	newMsg.SetKind(message.MsgCall)
	newMsg.SetID(ts.callId.Add(1))
	newMsg.SetFromActor(ts.ServiceID(), ts.SessionID(), ts.ClerkID())
	newMsg.SetToActor(toSrvID, "", toCID)
	newMsg.SetMsg(msg.GetProtocName(), msg.GetBody())
	return ts.postCall(newMsg, rsp)
}

func (ts *actorLocal) SetTimer(kind uint16, id uint32, dur time.Duration, ctx TimerContext) {
	ts.timerMan.SetTimer(timerKind32(kind), id, dur, ctx)
}

func (ts *actorLocal) SetTimerWithFirst(kind uint16, id uint32, fireAt time.Time, ctx TimerContext) {
	ts.timerMan.SetTimerWithFirst(timerKind32(kind), id, fireAt, ctx)
}

func (ts *actorLocal) SetTick(kind uint16, id uint32, dur time.Duration, repeat int, ctx TimerContext) {
	ts.timerMan.SetTick(timerKind32(kind), id, dur, repeat, ctx)
}

func (ts *actorLocal) SetTickWithFirst(kind uint16, id uint32, fireAt time.Time, dur time.Duration, repeat int, ctx TimerContext) {
	ts.timerMan.SetTickWithFirst(timerKind32(kind), id, fireAt, dur, repeat, ctx)
}

func (ts *actorLocal) KillTimer(kind uint16, id uint32) {
	ts.timerMan.KillTimer(timerKind32(kind), id)
}

func (ts *actorLocal) start(stayTime time.Duration) bool {
	if ts.alive.CompareAndSwap(false, true) {
		msg := message.TakeMessage()
		msg.SetKind(message.MsgEngine)
		msg.SetMsg(actorOnRunning, nil)
		msg.SetToActor(ts.ServiceID(), "", ts.ClerkID())
		if err := ts.msgQueue.Write(msg); err != nil {
			ts.alive.Store(false)
			return false
		}
		ts.stopOnce.Reset()
		_ = ts.runner.AsyncRun(ts.loop)
		if stayTime > 0 {
			ts.timerMan.SetTimer(timerLiveCheck, 0, stayTime+time.Second, nil)
		}
	}
	return true
}

func (ts *actorLocal) stop(isMove bool) {
	ts.stopOnce.Do(func() {
		ts.mu.Lock()
		ts.running = false
		ts.mu.Unlock()
		ts.timerMan.KillTimer(timerLiveCheck, 0) //关闭活跃检查定时器
		ts.exitChan <- struct{}{}
		ts.runner.Wait()
		ts.mu.Lock()
		ts.alive.Store(false)
		ts.mu.Unlock()

	loopTimer:
		for { //执行已触发的定时器,可能会有call发生
			select {
			case evt := <-ts.timerMan.wrap.C():
				ts.doTimer(evt)
			default:
				break loopTimer
			}
		}
		ts.actor.Unload(isMove)            //通知Behavior持久化数据
		if !ts.actorFactory.isNetService { //net不需要重新投递
			if isMove {
				switch ts.actorFactory.BLMode {
				case BLModeHash:
					moveMsg := message.TakeMessage()
					moveMsg.SetKind(message.MsgEngine)
					moveMsg.SetMsg(actorMoving, nil)
					moveMsg.SetToActor(ts.ServiceID(), "", ts.ClerkID())
					_gRouterProvider.post(moveMsg)
				default:
				}
			}
			ts.msgQueue.Read(_gRouterProvider.post) //重新投递未处理消息
		}
		ts.rpcProxy = nil
		ts.actorFactory.onActorStopped(ts.ClerkID())
	})
}

func (ts *actorLocal) init(fac *entity, cid string) {
	ts.actorFactory = fac
	ts.Source.Service = fac.ServiceID
	ts.Source.Session = fac.SessionID
	ts.Source.Clerk = cid
}

func (ts *actorLocal) onRecvRpcMsg(msg *message.Msg) {
	if msg.IsResponse() {
		ts.OnResponse(msg)
		message.FreeMessage(msg)
		return
	}

	ts.mu.Lock()
	defer ts.mu.Unlock()
	if !ts.running {
		_gRouterProvider.postUndo(msg)
		return
	}

	msg.CheckStepTime("actor.Rpc.OnRecvMsg")
	err := ts.msgQueue.Write(msg)
	if err != nil {
		err = ErrorMessageQueueIsFull
		doMsgPostErr(msg, err)
	}
}

func (ts *actorLocal) buildMsg(callKind message.Kind, srvID, ssid, cid string, protoc string, body []byte) *message.Msg {
	msg := message.TakeMessage()
	msg.SetKind(callKind)
	msg.SetID(ts.callId.Add(1))
	msg.SetFromActor(ts.ServiceID(), ts.SessionID(), ts.ClerkID())
	msg.SetToActor(srvID, ssid, cid)
	msg.SetMsg(protoc, body)
	return msg
}

func (ts *actorLocal) postCall(msg *message.Msg, rsp message.Response) error {
	msg.SetRspHandler(ts)
	ts.mu.Lock()
	ts.waitRsp = true
	ts.waitCallId = msg.GetMsgID()
	ts.mu.Unlock()

	reqJson := msg.ToJson() //放在这里是因为Post以后msg有管辖权问题.有可能已经被丢弃了
	initTime := time.Now()
	_gRouterProvider.post(msg)
	timeout := time.After(_gCallTimeOut)
	select {
	case rspMsg := <-ts.rspChan:
		ts.mu.Lock()
		defer ts.mu.Unlock()
		ts.waitRsp = false
		return ts.doResponse(rspMsg, rsp)
	case tt := <-timeout:
		ts.mu.Lock()
		defer ts.mu.Unlock()
		ts.waitRsp = false
		select {
		case rspMsg := <-ts.rspChan:
			return ts.doResponse(rspMsg, rsp)
		default:
			ts.timeOutCall[ts.waitCallId] = tt.Time
			log.GetLogger().Warn("call timeout").Str("call", reqJson).Float64("cost", 2, tt.Sub(initTime).Seconds())
			return errors.New("call timeout")
		}
	}
}

func (ts *actorLocal) doResponse(msg *message.Msg, rsp message.Response) error {
	defer message.FreeMessage(msg)
	msg.CheckStepTime("actor.Rpc.doResponse")
	switch msg.GetKind() {
	case message.MsgRspError:
		return errors.New(str_conv.ToString(msg.GetBody()))
	default:
		rsp.(*message.Msg).CloneData(msg)
		return nil
	}
}

func (ts *actorLocal) loop(...any) {
loopMsg:
	for {
		select {
		case _ = <-ts.exitChan:
			break loopMsg
		case evt := <-ts.timerMan.wrap.C():
			ts.doTimer(evt)
		case <-ts.msgQueue.Wait():
			ts.msgQueue.Read(ts.doMsg)
		}
	}
}

func (ts *actorLocal) doTimer(evt time.Trigger) {
	ctx := evt.Timer().Context().(*timerEvent)
	switch ctx.Kind {
	case timerLiveCheck:
		ts.mu.Lock()
		defer ts.mu.Unlock()
		diff := ts.aliveTime.Add(ts.actorFactory.opts.stayTime).Sub(evt.Time)
		if diff < 0 {
			ts._stop()
		}
	default:
		doOK := false
		timerKind := uint16(ctx.Kind)
		if h, ok := ts.actorFactory.findTimerHandler(timerKind); ok {
			safe.Func(ts, "do timer by handler", func() {
				doOK = h(ts, timerKind, ctx.ID, evt.Time, ctx.Ctx)
			})
		} else {
			safe.Func(ts, "do timer by default", func() {
				doOK = ts.actor.OnTimer(ts, timerKind, ctx.ID, evt.Time, ctx.Ctx)
			})
		}
		if !doOK {
			ts._stop()
		}
	}
}

func (ts *actorLocal) doMsg(msg *message.Msg) {
	defer message.FreeMessage(msg)
	switch msg.GetKind() {
	case message.MsgEngine:
		ts.doEnginMsg(msg)
		return
	case message.MsgNet, message.MsgCall, message.MsgCast:
		ts.aliveTime = time.Now()
	case message.MsgPush:
		ts.Push(msg.GetProtocName(), msg.GetBody())
		return
	default:
	}
	doOK := false
	if h, ok := ts.actorFactory.findProtocolHandler(msg.GetProtocName()); ok {
		safe.Func(ts, "do msg by handler", func() {
			doOK = h(ts, msg)
		})
	} else {
		safe.Func(ts, "do msg by recv", func() {
			doOK = ts.actor.OnRecvMsg(ts, msg)
		})
	}
	if !doOK {
		ts._stop()
	}
}

func (ts *actorLocal) doEnginMsg(msg *message.Msg) {
	msgName := msg.GetProtocName()
	switch msgName {
	case actorStart, actorMoving:
		//logger.TracePrintf("doEnginMsg:%v", msgName)
	case actorOnRunning:
		t1 := time.Now()
		err := ts.actor.Load(ts.ClerkID(), ts)
		d := time.Now().Sub(t1)
		if err != nil {
			log.GetLogger().Error("start actor failed").Object(ts).Err(err).Println()
			ts._stop()
		}
		if d.Milliseconds() > 500 {
			log.GetLogger().Warn("start actor slow").Object(ts).Float64("cost sec", 2, d.Seconds()).Println()
		}
		ts.mu.Lock()
		ts.running = true
		ts.mu.Unlock()
	case actorStop:
		ts._stop()
	case attachPushHandler:
		ts.rpcProxy = msg.GetFromActor().Copy()
	default:
	}
}

func (ts *actorLocal) _stop() {
	ts.actorFactory.stopActor(ts.ClerkID())
}

func (ts *actorLocal) checkResponseFailedReason(rsp message.Response) {
	rspID := rsp.GetMsgID()
	if tt, ok := ts.timeOutCall[rspID]; ok {
		delete(ts.timeOutCall, rspID)
		log.GetLogger().Warn("response slow").Object(rsp).Str("time out at", tt.Format(time.LayoutSec)).Println()
	} else {
		log.GetLogger().Error("response not call").Object(rsp).Println()
	}
}
