package poolserver

import (
	"errors"
	"net"
	"sync"
	"time"

	collector "csudata.com/zqpool/src/collector"
	"go.uber.org/zap"
)

/* 每个连接有一个
CachePrepare 字典：
	key为preparename
	value为PrepareData: 是一个结构体，
		成员PrepareRequest []byte 是prepare的数据
		BackendMap：为一个字典，key为PgBackendConn的ID，value为PrepareInBackend
			PrepareInBacken是一个结构体：
				PrepareId int32
				BackConn *PgBackendConn
				ReConnCnt int32
*/

const DEFAULT_BUF_SIZE int32 = 65536
const MAX_ERROR_MSG_SIZE int32 = 512
const MAX_PREPARE_STMT_SIZE = 65536 * 2 /*最大的Prepare的SQL长度，超过了此长度，会报错 */

var g_next_pid uint32 = 1000

// type PgFrontConn struct {
// 	Conn           net.Conn
// 	Pid            uint32
// 	BackendKeyData uint32
// }

type PrepareIdData struct {
	PrepareId      int32
	PrepareRequest []byte
}

type PrepareInBackend struct {
	PrepareId int32
	BackConn  *PgBackendConn
	ReConnCnt int32 //记录着当时完成prepare时后端连接的重连次数，如果后端重新连接之后，后端连接上的重连次数就会加1，这时就与此值不相同的，这样就知道了此连接被重新连接过
}

type PrepareData struct {
	PrepareRequest []byte                       /*发到后端的prepare数据*/
	BackendMap     map[uint32]*PrepareInBackend /*key为PgBackendConn.Id*/
	RwState        int8                         /*记录此语句是只读的查询语句，还是修改语句，查询语句记录为1，修改语句记录为2*/
}

// CachedPrepare 记录一个前端连接过来的prepare数据
type CachedPrepare struct {
	mp map[string]*PrepareData
}

type CuPre struct {
	backendPrepareId   int32
	prepareRequestData []byte
	prepareRequestLen  int32
	RwState            int8 /*记录此语句是只读的查询语句，还是修改语句，查询语句记录为1，修改语句记录为2*/
}

type ConnContext struct {
	msgList  [][]byte
	msgCount int32
	recvBuf  []byte
	sendBuf  []byte
	sendLen  int32
	//recvLen  int32
	//leftLen  int32
	recvBufSize int32

	cachedPrepare CachedPrepare
	transState    byte /* session的事务状态*/
	pool          *Pool

	isInUnnamedPrepared bool
	unnamePrepareData   []byte

	/*客户端发起的请求中，会有多个P消息和一个S消息，所以需要把多个P消息暂存在cupreMap(Cumulative Prepare)中*/
	cupreMap map[string]*CuPre

	//prepareRequestData []byte
	//prepareRequestLen int32
	//prepareName string
	//backendPrepareId int32

	Pid   uint32
	BeKey uint32

	cliConn net.Conn

	//isGetBackend  bool
	pBackConn     *PgBackendConn
	pSendBackConn *PgBackendConn
	Lock          sync.Mutex
}

func (p *PrepareInBackend) DeletePrepare() {
	p.BackConn.DeletePrepareId(p.PrepareId)
}

func (p *CachedPrepare) Init() {
	p.mp = make(map[string]*PrepareData)
}

func (p *CachedPrepare) Get(prepareName string) (*PrepareData, bool) {
	d, ok := p.mp[prepareName]
	return d, ok
}

func (p *CachedPrepare) AddPrepare(prepareName string, prepareData *PrepareData) {
	p.mp[prepareName] = prepareData
}

func (p *CachedPrepare) DeletePrepare(prepareName string) {
	prepareData, ok := p.mp[prepareName]
	if !ok {
		return
	}

	for _, prepareInBackend := range prepareData.BackendMap {
		prepareInBackend.DeletePrepare()
	}
	delete(p.mp, prepareName)
}

func (p *CachedPrepare) Discard() {
	for _, v := range p.mp {
		for _, prepareInBackend := range v.BackendMap {
			prepareInBackend.DeletePrepare()
		}
	}
	p.mp = nil
}

//var g_backend_pool = make(map[string]*Pool, 1)

//var g_pools_version = make(map[string]string, 1)

func appendString(buf []byte, str string) int32 {
	n := copy(buf, str)
	buf[n] = 0
	return int32(n + 1)
}

func (ctx *ConnContext) BeReconnect() error {
	var rw_state int8
	//portal = ctx.pBackConn.ConnPortal
	rw_state = ctx.pBackConn.RwState

	if ctx.TryRepairConn() {
		return nil
	} else {
		//无法修复，则释放连接后从连接池中拿一个新的连接
		ctx.FreeBeConn()
		if !ctx.AllocBeConn(rw_state) {
			return errors.New("can not get connection from backend")
		}
		return nil
	}
}

func (ctx *ConnContext) TryRepairConn() bool {
	/*尝试修复连接，如果修复成功，则返回true，否则返回false*/

	var portal string
	var err error
	var retryCnt int32 = 0
	var beState int8
	var bekey uint32
	var pid uint32
	var conn net.Conn

	pool := ctx.pool
	ctx.pBackConn.Conn.Close()
	portal = ctx.pBackConn.ConnPortal
	for {

		pool.Lock.RLock()
		bePool := pool.BePools[portal]

		bePool.Lock.Lock()
		beState = bePool.State
		bePool.Lock.Unlock()
		if beState != BE_ONLINE { //如果bePool的状态已经时坏，或其它的状态，则无法修复
			pool.Lock.RUnlock()
			return false
		}

		zap.S().Infof("Reconnect: %s", portal)
		conn, err, pid, bekey = connectBackend(portal, ctx.pool.BeUser, ctx.pool.BePasswd, ctx.pool.BeDbName)
		if err == nil {
			ctx.Lock.Lock()
			ctx.pBackConn.Conn = conn
			ctx.pBackConn.Pid = pid
			ctx.pBackConn.BeKey = bekey
			ctx.pBackConn.ConnUnixTime = time.Now().Unix()
			ctx.pBackConn.ReConnCnt++
			ctx.Lock.Unlock()
			pool.Lock.RUnlock()
			return true
		}
		zap.S().Infof("Could not connect(%s): %s", portal, err.Error())
		retryCnt++
		if retryCnt > pool.BeRetryCnt { //多次重试后，仍然不能连接，则认为此portal坏掉了
			//把此portal的状态设置为坏，然后重新分配一个新的连接
			bePool.Lock.Lock()
			bePool.State = BE_BAD_NO_FREE
			bePool.Lock.Unlock()
			pool.Lock.RUnlock()
			return false
		}
		pool.Lock.RUnlock()
		time.Sleep(time.Duration(pool.BeRetryInterval) * time.Second)
	}
}

func (ctx *ConnContext) Init(cliConn net.Conn, msgBufSize int) {
	if msgBufSize == 0 {
		msgBufSize = int(DEFAULT_BUF_SIZE)
	}

	ctx.cliConn = cliConn
	ctx.recvBufSize = int32(msgBufSize)
	ctx.recvBuf = make([]byte, msgBufSize)
	ctx.sendBuf = make([]byte, msgBufSize+1024)
	ctx.msgList = make([][]byte, 512)

	//ctx.isGetBackend = false
	ctx.pBackConn = nil
	ctx.isInUnnamedPrepared = false
	ctx.transState = 'I'

	//ctx.prepareRequestLen = 0

	ctx.cupreMap = make(map[string]*CuPre)
}

func noused(n int32) int32 {
	return n
}

// CollectBackendConnections prometheus采集后端连接
func CollectBackendConnections() {
	var pool *Pool
	var bePoolCnt int
	var statCnt float64
	PoolsLock.RLock()
	for _, pool = range g_allpools {
		bePoolCnt = len(pool.BePools)
		statCnt = float64((bePoolCnt-1)*int(pool.BeRdConnCount) + int(pool.BeRwConnCount))
		collector.UpdateBackendConnections(pool.ID, statCnt)
	}
	PoolsLock.RUnlock()
}

// CollectActiveBackendConnections prometheus 采集后端活动的连接
func CollectActiveBackendConnections() {
	var pool *Pool
	var bePool *BePool
	var activeBackendConnNum float64 = 0

	PoolsLock.RLock()
	for _, pool = range g_allpools {
		pool.Lock.RLock()
		for _, bePool = range pool.BePools {
			activeBackendConnNum += float64(len(bePool.BeConns) - len(bePool.FreeChan))
		}
		collector.UpdateAcitveBackendConnections(pool.ID, activeBackendConnNum)
		activeBackendConnNum = 0
		pool.Lock.RUnlock()
	}
	PoolsLock.RUnlock()
}

func StartServer(listenAddr string) {
	var pool *Pool
	PoolsLock.RLock()
	for _, pool = range g_allpools {
		for portal, bePool := range pool.BePools {
			err := BuildBePoolConn(pool, portal, bePool)
			if err != nil {
				PoolsLock.RUnlock()
				zap.S().Errorf("Build pool backend connect failed: %s", listenAddr, err.Error())
				return
			}
			bePool.State = BE_ONLINE
		}
	}
	PoolsLock.RUnlock()

	go freshPools()
	go BePoolRecoveryPending()
	go checkPoolsHealth()

	zap.S().Infof("Starting server on %s ...", listenAddr)
	addr, _ := net.ResolveTCPAddr("tcp", listenAddr)

	listener, err := net.ListenTCP("tcp", addr)
	if err != nil {
		zap.S().Errorf("Could not listen on %s: %s", listenAddr, err.Error())
		return
	}

	ticker := time.NewTicker(10 * time.Millisecond)

	// prometheus 启动监控
	go func() {
		for range ticker.C {
			CollectBackendConnections()
			CollectActiveBackendConnections()
		}
	}()

	for {
		con, err := listener.Accept()
		if err != nil {
			zap.S().Error("Failed to accepting a connection: ", err)
		}
		go handleConnection(con)
	}
}
