package msgQueueTCP

import (
	"fmt"
	. "gitee.com/kingzyt/common/countPool"
	"gitee.com/kingzyt/common/log"
	. "gitee.com/kingzyt/common/tcpShell"
	. "gitee.com/kingzyt/common/util"
	"math/rand"
	"net"
	"sync"
	"sync/atomic"
	"time"
)

const mq_overdue_check_time_interval = time.Minute * 5

const (
	status_hub_quited int32 = iota
	status_hub_running
)

type UsingMqtcp struct {
	mqtcp *MsgQueueTCP
	raddr string
}

type HubNewParams struct {
	CloseOverdueChecker bool
	DialerHint          string // just for dialer when mq used as listener
}

type MsgQueueTCPHub struct {
	name        string
	dialerHint  string
	nextMqId    int
	newMQParams NewParams
	mqtcps      map[string]*MsgQueueTCP // mqName->mq, all mq, using or unused
	mqtcpsMutex sync.Mutex

	/*
	   比如client的mqhub，外部调用不需要知道有哪些client连接，只要单个处理mq的消息即可，而s/lm/mgr需要知道连接布局，
	   获得使用中的mq会涉及一个list，mq开启和关闭都会进出这个list，加锁保护，
	   所以client的mqhub可能有大量的mq，频繁开关mq，这个锁会降低效率，而且没有必要，
	   而且需要维护list，有全部遍历的情况发生，还有list的添加删除，严重影响效率
	   而s/lm/mgr只有少量的mq，而且保证长期连接，开关次数很少，不会影响效率
	*/
	needUsingMqList  bool
	usingMqtcps      []UsingMqtcp
	usingMqtcpsMutex sync.RWMutex

	status   int32
	quitSign chan byte

	listenerQuiters      map[string]chan byte
	listenerQuitersMutex sync.Mutex
}

func (self *MsgQueueTCPHub) GetName() string {
	return self.name
}

func NewHub(name string, newMQParams NewParams, newHubParams HubNewParams) *MsgQueueTCPHub {
	rlt := &MsgQueueTCPHub{
		name:        name,
		dialerHint:  newHubParams.DialerHint,
		newMQParams: newMQParams,
		mqtcps:      make(map[string]*MsgQueueTCP),

		needUsingMqList: true,
		usingMqtcps:     make([]UsingMqtcp, 0),

		status:   status_hub_running,
		quitSign: make(chan byte, 1),

		listenerQuiters: make(map[string]chan byte),
	}

	if !newHubParams.CloseOverdueChecker {
		go rlt.overdueMQCheck()
	}

	return rlt
}

// config func, use just after NewHub
func (self *MsgQueueTCPHub) DisableUsingMqList() {
	self.needUsingMqList = false
	self.usingMqtcps = nil
}

/*
因为mqhub本身不会在mqclosed之后删除mq，即使会删除也是在一个比较长的时间之后，
而且因为系列函数都是锁保护住的，最多也就是返回了mq之后立马被closed了，并从hub的list中删除，
但因为引用计数的关系不会释放对象，使用方在发送时也就是判断到closed了，然后放弃发送而已，
没有任何线程安全类问题，保存的mq指针在outseat的时候被设置为nil，触发mq释放，或是在delete的时候直接释放
*/
func (self *MsgQueueTCPHub) GetMQTCP(raddr string) (mqtcp *MsgQueueTCP, ok bool) {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		if self.usingMqtcps[i].raddr == raddr {
			return self.usingMqtcps[i].mqtcp, true
		}
	}

	return
}
func (self *MsgQueueTCPHub) GetMQTCPRemoteAddr(mqtcp *MsgQueueTCP) (raddr string, ok bool) {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		if self.usingMqtcps[i].mqtcp == mqtcp {
			return self.usingMqtcps[i].raddr, true
		}
	}
	return
}
func (self *MsgQueueTCPHub) MQUsingCount() int {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	return len(self.usingMqtcps)
}
func (self *MsgQueueTCPHub) ChooseMQTCPByRand() *MsgQueueTCP {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	if count > 0 {
		return self.usingMqtcps[rand.Int31n(int32(count))].mqtcp
	} else {
		return nil
	}
}
func (self *MsgQueueTCPHub) ChooseMQTCPByAddr(addr string, byPort bool) *MsgQueueTCP {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	// ip, a.b.c.d;  idx = (a*65536+b*256+c)%smqCount
	count := len(self.usingMqtcps)
	if count == 0 {
		return nil
	}

	tcpaddr, err := net.ResolveTCPAddr("tcp", addr)
	if err != nil {
		return nil
	}
	var idx int
	if byPort {
		idx = int((float32(tcpaddr.Port%10) / float32(10)) * float32(count))
	} else {
		/*
			根据临近地区的ip会有相同的ip地址部分,我们将A.B.C.D中的ABC
			通过A*65536+B*256+C相加的形式得到一个值,临近地区大多数这个值也是一样的,
			这样我们可以将在同一区域的玩家放到一个L/G中,
			以期改善好友间的交互,一般好友都是同城/同公司/同小区这样的地理位置也比较接近的玩家
		*/
		ipbytes := []byte(tcpaddr.IP)
		iplen := len(ipbytes)
		idx = (int(ipbytes[iplen-3])*65536 + int(ipbytes[iplen-2])*256 + int(ipbytes[iplen-1])) % count
	}

	return self.usingMqtcps[idx].mqtcp
}
func (self *MsgQueueTCPHub) ChooseMQTCPByIdx(idx int) *MsgQueueTCP {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	if idx < 0 || idx >= len(self.usingMqtcps) {
		return nil
	}

	return self.usingMqtcps[idx].mqtcp
}

func (self *MsgQueueTCPHub) GetRWRate() (r int32, w int32, rb int64, wb int64) {
	if !self.needUsingMqList {
		// here will be called at any condition, so no panic
		return
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		rRate, wRate, rBytes, wBytes := self.usingMqtcps[i].mqtcp.GetRWRate()
		r += rRate
		w += wRate
		rb += rBytes
		wb += wBytes
	}
	return
}
func (self *MsgQueueTCPHub) GetRWWaitCnt() (r int32, w int32) {
	if !self.needUsingMqList {
		// here will be called at any condition, so no panic
		return
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		rWaitCnt, wWaitCnt := self.usingMqtcps[i].mqtcp.GetRWWaitCnt()
		r += rWaitCnt
		w += wWaitCnt
	}
	return
}

func (self *MsgQueueTCPHub) useMQTCP(raddr string, mqtcp *MsgQueueTCP) {
	if !self.needUsingMqList {
		// here will be called at any condition, so no panic
		//panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
		return
	}

	self.usingMqtcpsMutex.Lock()
	defer self.usingMqtcpsMutex.Unlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		if self.usingMqtcps[i].raddr == raddr {
			self.usingMqtcps[i].mqtcp = mqtcp
			return
		}
	}

	self.usingMqtcps = append(self.usingMqtcps, UsingMqtcp{mqtcp: mqtcp, raddr: raddr})
}
func (self *MsgQueueTCPHub) unuseMQTCP(mqtcp *MsgQueueTCP) {
	if !self.needUsingMqList {
		// here will be called at any condition, so no panic
		//panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
		return
	}

	self.usingMqtcpsMutex.Lock()
	defer self.usingMqtcpsMutex.Unlock()

	rlt, elemDeleted := SliceMultiDelByReflect(self.usingMqtcps, func(idx int, elem interface{}) bool {
		return elem.(UsingMqtcp).mqtcp == mqtcp
	})
	if elemDeleted {
		self.usingMqtcps = rlt.([]UsingMqtcp)
	}
}

// 因为RLock现象上是可以重入的(实际是rlock了多次),所以内部的do里面可以有rlock的调用,但是不能有wlock的调用
func (self *MsgQueueTCPHub) IteratorMQTCPsAddr(do func(addr string)) {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	if do == nil {
		return
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		do(self.usingMqtcps[i].raddr)
	}
}

// do not do any lock in the callback, maybe deadlock
func (self *MsgQueueTCPHub) IteratorMQTCPs(do func(mqtcp *MsgQueueTCP)) {
	if !self.needUsingMqList {
		panic(fmt.Errorf("%s, needUsingMqList is off", self.name))
	}

	if do == nil {
		return
	}

	self.usingMqtcpsMutex.RLock()
	defer self.usingMqtcpsMutex.RUnlock()

	count := len(self.usingMqtcps)
	for i := 0; i < count; i++ {
		do(self.usingMqtcps[i].mqtcp)
	}
}

// mqtcpsMutex锁保护通过调用它的函数执行
func (self *MsgQueueTCPHub) getMsgQueueTCP() (mqtcp *MsgQueueTCP) {
	for _, mqtcpV := range self.mqtcps {
		if mqtcpV.Closed() {
			log.Info(self.name, log.I, "mqtcp[%s] reused", mqtcpV.GetName())
			mqtcp = mqtcpV
			break
		}
	}

	if mqtcp == nil {
		self.nextMqId++
		newMQName := fmt.Sprintf("%s_mq_%d", self.name, self.nextMqId)
		mqtcp = New(newMQName, self.newMQParams)

		self.mqtcps[newMQName] = mqtcp
	}

	return mqtcp
}

//防止死链接持续占用资源，同时又给临时断开的连接有重新恢复的机会，默认10分钟处于关闭状态的mq就直接释放
func (self *MsgQueueTCPHub) overdueMQCheck() {
	checker := time.NewTicker(mq_overdue_check_time_interval)
	defer checker.Stop()

End:
	for {
		select {
		case <-self.quitSign:
			break End
		case <-checker.C:
			func() {
				// 删除和分配closed的mq，是用同一个锁保护的，保证不会将过期的mq分配出去
				self.mqtcpsMutex.Lock()
				defer self.mqtcpsMutex.Unlock()

				removeList := make([]string, 0)
				for name, mq := range self.mqtcps {
					if mq.Overdue() {
						mq.Quit()
						removeList = append(removeList, name)
					}
				}

				for _, name := range removeList {
					delete(self.mqtcps, name)
				}
			}()
		}
	}
}

func (self *MsgQueueTCPHub) StartListener(
	listenPort string,
	onDialAccept func(mqtcp *MsgQueueTCP, raddr string),
	// procReceiveMsg do in one goroutine, and msg is get from bufferPool, can do big work in this func
	procReceiveMsg func(mqtcp *MsgQueueTCP, marks []uint32, msg []byte),
	onStopped func(mqtcp *MsgQueueTCP, dialerHint string),
	validIPs []string,
) bool {
	//锁外判定，提前跳出
	if atomic.LoadInt32(&self.status) == status_hub_quited {
		return false
	}

	self.listenerQuitersMutex.Lock()
	defer self.listenerQuitersMutex.Unlock()

	if self.listenerQuiters[listenPort] != nil {
		log.Error(self.name, "listener(:%s) is existed!", listenPort)
		return false
	}

	listenerQuiter, ok := StartTCPListener(self.name+"_tcp", listenPort, GlobalTcpPool,
		func(conn *net.TCPConn) {
			// 这里保证从队列中获得的mq被open，之后就不会再被分配给其他的goroutine
			self.mqtcpsMutex.Lock()
			defer self.mqtcpsMutex.Unlock()

			//锁内判定
			if atomic.LoadInt32(&self.status) == status_hub_quited {
				conn.Close()
				GlobalTcpPool.ReturnOne() // pair to tcpShell.go, GetOne()
				return
			}

			mqtcp := self.getMsgQueueTCP()
			if !mqtcp.Open(conn, procReceiveMsg,
				func(raddr string) {
					self.useMQTCP(raddr, mqtcp)
					if onDialAccept != nil {
						onDialAccept(mqtcp, raddr)
					}
				},
				func(mqtcp *MsgQueueTCP) {
					GlobalTcpPool.ReturnOne() // pair to tcpShell.go, GetOne()
					if onStopped != nil {
						onStopped(mqtcp, self.dialerHint) // dialerHint will never change after hubnew
					}
					self.unuseMQTCP(mqtcp)
				},
			) {
				GlobalTcpPool.ReturnOne()
			}
		},
		validIPs)

	if ok {
		self.listenerQuiters[listenPort] = listenerQuiter
	}

	return ok
}
func (self *MsgQueueTCPHub) StartDialer(
	// ip合并到本地ip的工作,应该在外面完成,而不是内部,因为需要在地址的数据源就修改为本地ip,不然在使用ip查询mq的时候会因为ip不同而查找失败
	raddr string,
	// procReceiveMsg do in one goroutine, and msg is get from bufferPool, can do big work in this func
	procReceiveMsg func(mqtcp *MsgQueueTCP, marks []uint32, msg []byte),
	onStopped func(mqtcp *MsgQueueTCP),
) (mqtcp *MsgQueueTCP, ok bool) {
	//锁外判定，提前跳出
	if atomic.LoadInt32(&self.status) == status_hub_quited {
		return
	}

	conn := StartTCPDialer(self.name+"_tcp", raddr, Tcp_dial_time_out)
	if conn == nil {
		return
	}

	// 这里保证从队列中获得的mq被open，之后就不会再被分配给其他的goroutine
	self.mqtcpsMutex.Lock()
	defer self.mqtcpsMutex.Unlock()

	//锁内判定
	if atomic.LoadInt32(&self.status) == status_hub_quited {
		conn.Close()
		return
	}

	mqtcp = self.getMsgQueueTCP()
	ok = mqtcp.Open(conn, procReceiveMsg,
		func(raddr string) {
			self.useMQTCP(raddr, mqtcp)
		},
		func(mqtcp *MsgQueueTCP) {
			if onStopped != nil {
				onStopped(mqtcp)
			}
			self.unuseMQTCP(mqtcp)
		})

	if !ok {
		return nil, false
	}
	return mqtcp, true
}

func (self *MsgQueueTCPHub) Quit() {
	self.mqtcpsMutex.Lock()
	defer self.mqtcpsMutex.Unlock()

	if !atomic.CompareAndSwapInt32(&self.status, status_hub_running, status_hub_quited) {
		return
	}

	self.listenerQuitersMutex.Lock()
	defer self.listenerQuitersMutex.Unlock()

	for _, listenerQuiter := range self.listenerQuiters {
		listenerQuiter <- 1
		close(listenerQuiter)
	}

	for _, mqtcp := range self.mqtcps {
		mqtcp.Quit()
	}

	self.quitSign <- 1
	close(self.quitSign)
}

func (self *MsgQueueTCPHub) AllClosed() bool {
	if func() bool {
		self.usingMqtcpsMutex.RLock()
		defer self.usingMqtcpsMutex.RUnlock()
		return len(self.usingMqtcps) > 0
	}() {
		return false
	}

	self.mqtcpsMutex.Lock()
	defer self.mqtcpsMutex.Unlock()

	for _, mqtcp := range self.mqtcps {
		if !mqtcp.Closed() {
			return false
		}
	}

	return true
}
