/*
 *  Copyright (C) 2025 ameise <ameise.wang@gmail.com> - All Rights Reserved
 *
 *  This file is part of e3net.
 *
 *  e3net is free software: you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation, either version 3 of the License, or
 *  (at your option) any later version.
 *
 *  e3net is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with e3net. If not, see <https://www.gnu.org/licenses/>.
 */

package e3net

import (
	"fmt"
	"net"
	"runtime"
	"sync/atomic"
	"time"

	"gitee.com/ameise84/e3ds/deque"
	"gitee.com/ameise84/e3lock"
	"gitee.com/ameise84/e3net/internal/packet"
	"gitee.com/ameise84/e3pool/go_pool"
	"gitee.com/ameise84/e3pool/obj_pool"
	"gitee.com/ameise84/e3utils/bytes_buffer"
	"github.com/pkg/errors"
)

var _gConnInstId atomic.Uint32

type connFactory interface {
	handleCloseConn(*stdTcpConn)
}

func newStdTcpConnPool(opts *tcpConnOptions) *stdTcpConnPool {
	connPool := &stdTcpConnPool{
		bufferPool: newBufferPool(int(opts.packetSize)),
		packHandle: opts.packHandle,
		gc:         deque.NewDeque[*stdTcpConn](int(opts.connHoldSize)),
	}
	connPool.pool = obj_pool.NewPool[*stdTcpConn](func() *stdTcpConn {
		c := &stdTcpConn{
			instId:      _gConnInstId.Add(1),
			builder:     connPool,
			packMsgBuff: connPool.bufferPool.Get(),
		}
		c.sendWorker = go_pool.NewGoRunner(c, "tcp conn send", go_pool.DefaultOptions().SetSimCount(1).SetBlock(false).SetSize(256))
		c.recvWorker = go_pool.NewGoRunner(c, "tcp conn recv", go_pool.DefaultOptions().SetSimCount(1))
		c.isSendWake.Store(false)
		return c
	})
	return connPool
}

type stdTcpConnPool struct {
	pool       obj_pool.Pool[*stdTcpConn]
	bufferPool *bufferPool
	packHandle packet.PackHandler
	gcLock     e3lock.SpinLock
	gc         deque.IDeque[*stdTcpConn]
	lastGC     atomic.Int64
}

func (ts *stdTcpConnPool) take() (c *stdTcpConn) {
	c = ts.checkGC(true)
	if c == nil {
		c = ts.pool.Get()
	}
	return
}

func (ts *stdTcpConnPool) free(c *stdTcpConn) {
	ts.gcLock.Lock()
	err := ts.gc.PushBack(c)
	ts.gcLock.Unlock()
	if err != nil {
		ts.checkGC(false)
	}
}

func (ts *stdTcpConnPool) checkGC(takeOne bool) (c *stdTcpConn) {
	now := time.Now().Unix()
	if now-ts.lastGC.Load() > gcTime {
		ts.lastGC.Store(now)
		ts.gcLock.Lock()
		defer ts.gcLock.Unlock()
		for {
			tmpConn, err := ts.gc.PopFront()
			if err != nil {
				break
			}
			if now-tmpConn.invalidTime > invalidTime {
				if takeOne && c == nil {
					c = tmpConn
				} else {
					ts.pool.Put(tmpConn)
				}
			} else {
				_ = ts.gc.PushFront(tmpConn)
				break
			}
		}
	}
	return
}

type stdTcpConn struct {
	instId      uint32
	id          uint64
	builder     *stdTcpConnPool
	factory     connFactory
	tag         Tag
	fd          net.Conn
	isConnected bool        //是否处于连接状态
	ctx         ConnContext //应用层扩展数据
	readTime    time.Duration
	writeTime   time.Duration
	invalidTime int64 //失效时间
	closeOnce   e3lock.Once
	closeLock   e3lock.SpinLock
	sendWorker  go_pool.GoRunner
	recvWorker  go_pool.GoRunner
	isSendWake  atomic.Bool
	packMsgBuff *bytes_buffer.ShiftBuffer
	sendCount   atomic.Uint64
	sendSize    atomic.Uint64
	recvCount   atomic.Uint64
	recvSize    atomic.Uint64
}

func (ts *stdTcpConn) E3LogMarshall() string {
	return fmt.Sprintf("std tcp conn[<%s> <%s>][%v %d]", ts.LocalAddr(), ts.RemoteAddr(), ts.tag, ts.instId)
}

func (ts *stdTcpConn) OnPanic(err error) {
	_gLogger.Error("OnPanic").Object(ts).Err(err).Println()
}

func (ts *stdTcpConn) ID() uint64 {
	return ts.id
}

func (ts *stdTcpConn) Tag() Tag {
	return ts.tag
}

func (ts *stdTcpConn) SetContext(ctx ConnContext) {
	ts.closeLock.Lock()
	if ts.isConnected {
		ts.ctx = ctx
	}
	ts.closeLock.Unlock()
}

func (ts *stdTcpConn) Context() ConnContext {
	return ts.ctx
}

func (ts *stdTcpConn) LocalAddr() string {
	return ts.fd.LocalAddr().String()
}

func (ts *stdTcpConn) RemoteAddr() string {
	return ts.fd.RemoteAddr().String()
}

func (ts *stdTcpConn) SendSync(msg []byte) error {
	ts.closeLock.Lock()
	defer ts.closeLock.Unlock()
	if !ts.isConnected {
		return ErrorConnClosed
	}
	bf := ts.builder.bufferPool.Get()
	bf.Clean()
	wf := bf.GetTailEmptyBytes()
	wn, err := ts.builder.packHandle.Pack(msg, wf)
	if err != nil {
		return err
	}
	bf.AddLen(wn)
	_, err = ts.sendWorker.SyncRun(ts.doSendSync, bf)
	return err
}

func (ts *stdTcpConn) SendAsync(msg []byte) (err error) {
	ts.closeLock.Lock()
	defer ts.closeLock.Unlock()

	if !ts.isConnected {
		return ErrorConnClosed
	}

	bf := ts.builder.bufferPool.Get()
	bf.Clean()
	wf := bf.GetTailEmptyBytes()
	wn, err := ts.builder.packHandle.Pack(msg, wf)
	if err != nil {
		return err
	}
	bf.AddLen(wn)
	err = ts.sendWorker.AsyncRun(ts.doSendAsync, bf)
	return err
}

func (ts *stdTcpConn) IsConnected() bool {
	ts.closeLock.Lock()
	defer ts.closeLock.Unlock()
	return ts.isConnected
}

func (ts *stdTcpConn) Close() (err error) {
	ts.closeOnce.Do(func() {
		ts.closeLock.Lock()
		defer ts.closeLock.Unlock()
		if ts.isConnected {
			ts.sendWorker.Wait()
			ts.isConnected = false
			if ts.fd != nil {
				_ = ts.fd.Close()
			}
			go_pool.NewGoWorkerDo(ts, "tcp conn close", func(...any) {
				ts.recvWorker.Wait()
				_gLogger.Trace("close").Object(ts).Str("traffic", ts.TrafficData()).Println()
				ts.factory.handleCloseConn(ts)
			})
		}
	})
	return
}

func (ts *stdTcpConn) TrafficData() string {
	return fmt.Sprintf("{send:{count:%v,size:%v},recv:{count:%v,size:%v}}", ts.sendCount.Load(), ts.sendSize.Load(), ts.recvCount.Load(), ts.recvSize.Load())
}

func (ts *stdTcpConn) active(f connFactory, tag Tag, fd net.Conn, readTime int64, writeTime int64) {
	ts.id = uint64(ts.instId) | ((ts.id & 0xFFFFFFFF) + 1)
	ts.factory = f
	ts.tag = tag
	ts.fd = fd
	ts.readTime = time.Duration(readTime) * time.Second
	ts.writeTime = time.Duration(writeTime) * time.Second
	ts.packMsgBuff.Clean()
	ts.sendCount.Store(0)
	ts.sendSize.Store(0)
	ts.recvCount.Store(0)
	ts.recvSize.Store(0)
	ts.closeOnce.Reset()
	return
}

func (ts *stdTcpConn) inActive() (fd net.Conn) {
	ts.factory = nil
	ts.builder = nil
	ts.invalidTime = time.Now().Unix()
	fd = ts.fd
	ts.fd = nil
	return
}

func (ts *stdTcpConn) closeAndWaitRecv() error {
	err := ts.Close()
	if err == nil {
		ts.recvWorker.Wait()
	}
	return err
}

func (ts *stdTcpConn) loopRead(...any) {
	var err error
	var n int
	rdBuffer := bytes_buffer.NewShiftBuffer(ts.builder.bufferPool.size*4, 0, false)
	for {
		if ts.readTime > 0 {
			readOverTime := time.Now().Add(ts.readTime)
			err = ts.fd.SetReadDeadline(readOverTime)
			if err != nil {
				break
			}
		}
		bf := rdBuffer.GetTailEmptyBytes()
		n, err = ts.fd.Read(bf)
		if err != nil {
			break
		}
		if n <= 0 {
			runtime.Gosched()
			continue
		}
		ts.recvSize.Add(uint64(n))
		rdBuffer.AddLen(n)
		err = ts.handleUnPacketMsg(&rdBuffer)
		if err != nil {
			break
		}
	}

	if isCaredError(err) { //排除正常关闭
		_gLogger.Error("handle recv").Object(ts).Err(err).Println()
	} else if rdBuffer.GetDataSize() != 0 {
		_gLogger.Error("handle recv  data!=0").Object(ts).Err(err).Println()
	}
	_ = ts.Close()
}

func (ts *stdTcpConn) handleUnPacketMsg(read bytes_buffer.Reader) error {
	var rsp []byte
	w := ts.packMsgBuff.GetTailEmptyBytes()

	for {
		r, _ := read.Peek()
		if len(r) == 0 {
			return nil
		}

		rn, msg, err := ts.builder.packHandle.UnPack(r, w)
		if msg == nil || err != nil {
			return err
		}

		_, _ = read.FetchLen(rn)
		ts.recvCount.Add(1)
		rsp, err = ts.ctx.OnRecv(ts, msg)
		if len(rsp) > 0 {
			sendErr := ts.SendAsync(rsp)
			if sendErr != nil {
				err = errors.WithMessage(err, sendErr.Error())
			}
		}
		if err != nil {
			return err
		}
	}
}

func (ts *stdTcpConn) run() bool {
	ts.isConnected = true
	if err := ts.recvWorker.AsyncRun(ts.loopRead); err != nil {
		ts.isConnected = false
		return false
	}
	return true
}

func (ts *stdTcpConn) doSendAsync(args ...any) {
	bf := args[0].(*bytes_buffer.ShiftBuffer)
	defer func() {
		ts.builder.bufferPool.Put(bf)
	}()
	msg, _ := bf.Fetch()
	err := ts.doSend(msg)
	if err != nil {
		_gLogger.Warn("doSendAsync").Object(ts).Err(err).Println()
	}
}

func (ts *stdTcpConn) doSendSync(args ...any) (any, error) {
	bf := args[0].(*bytes_buffer.ShiftBuffer)
	defer func() {
		ts.builder.bufferPool.Put(bf)
	}()
	msg, _ := bf.Fetch()
	err := ts.doSend(msg)
	if err != nil {
		_gLogger.Warn("doSendSync").Object(ts).Err(err).Println()
	}
	return nil, err
}

func (ts *stdTcpConn) doSend(msg []byte) (err error) {
	fd := ts.fd
	var n int
	if ts.writeTime > 0 {
		if err = fd.SetWriteDeadline(time.Now().Add(ts.writeTime)); err != nil {
			return err
		}
	}

	writeLen := len(msg)
	idx := 0
	for {
		n, err = fd.Write(msg[idx:])
		if err != nil {
			go_pool.NewGoWorkerDo(ts, "tcp conn send close", func(...any) {
				_ = ts.Close()
			})
			return err
		}
		idx += n
		if idx == writeLen {
			break
		}
	}
	ts.sendCount.Add(1)
	ts.sendSize.Add(uint64(writeLen))
	return nil
}
