package comet

import (
	"context"
	log "github.com/golang/glog"
	"im_backend/api/protocol"
	"im_backend/internal/comet/conf"
	"im_backend/pkg/bufio"
	"im_backend/pkg/bytes"
	xtime "im_backend/pkg/time"
	"io"
	"net"
	"strings"
	"time"
)

const (
	maxInt32 = 1<<31 - 1
)

func InitTCP(server *Server, addrs []string, accept int) (err error) {
	var (
		bind     string
		listener *net.TCPListener
		addr     *net.TCPAddr
	)
	for _, bind = range addrs {
		// bind形如":3101"
		if addr, err = net.ResolveTCPAddr("tcp", bind); err != nil {
			log.Errorf("net.ResolveTCPAddr(tcp, %s) error(%v)", bind, err)
			return
		}
		// 接下来调用listener.AcceptTCP()
		if listener, err = net.ListenTCP("tcp", addr); err != nil {
			log.Errorf("net.ListenTCP(tcp, %s) error(%v)", bind, err)
			return
		}
		log.Infof("start tcp listen: %s", bind)
		for i := 0; i < accept; i++ {
			// 传入的server代表server的config，每个CPU都跑起来
			go acceptTCP(server, listener)
		}
	}
	return
}

// Accept accepts connections on the listener and serves requests
// for each incoming connection.  Accept blocks; the caller typically
// invokes it in a go statement.
func acceptTCP(server *Server, listener *net.TCPListener) {
	var (
		conn *net.TCPConn
		err  error
		r    int
	)
	for {
		if conn, err = listener.AcceptTCP(); err != nil { // 没有tcp连接到来的时候，代码在这里阻塞
			// if listener close then return
			log.Errorf("listener.Accept(\"%s\") error(%v)", listener.Addr().String(), err)
			return
		}
		// set conn config with server config
		if err = setConnConfig(server, conn); err != nil {
			log.Errorf("error setting TCP connection config")
			return
		}
		// 对于每一个到来的tcp连接，起一个协程来服务它
		// 如果没有tcp连接到来，则阻塞在上面的AcceptTCP()函数
		go serveTCP(server, conn, r) // 起一个协程来真正处理这个连接，这个协程落在哪个CPU上则是随机的
		// r: round-robin
		// TODO: 如果每个cpu上的r都一样，那么还是会造成比较严重的锁的竞争，如何解决？
		if r++; r == maxInt32 {
			r = 0
		}
	}
}

func setConnConfig(server *Server, conn *net.TCPConn) (err error) {
	// 默认的config是don't keepalive
	// tcp默认的自动断开连接的时间是2h，很长了，没必要在此基础上keepalive
	if err = conn.SetKeepAlive(server.c.TCP.KeepAlive); err != nil {
		log.Errorf("conn.SetKeepAlive() error(%v)", err)
		return
	}

	// 默认4KB大小的接收缓冲区
	// 注意，这个默认缓冲区大小的设置其实很有讲究
	// 我们的应用里面每个comet会有很多个连接，但是每个连接需要传输的数据量却不大，为了节约内存，可以将这个缓冲区的大小设置的小一点
	if err = conn.SetReadBuffer(server.c.TCP.Rcvbuf); err != nil {
		log.Errorf("conn.SetReadBuffer() error(%v)", err)
		return
	}

	// 同样4KB大小
	if err = conn.SetWriteBuffer(server.c.TCP.Sndbuf); err != nil {
		log.Errorf("conn.SetWriteBuffer() error(%v)", err)
		return
	}
	return nil
}

func serveTCP(server *Server, conn *net.TCPConn, r int) {
	//  多个CPU还是可能得到同一个timer/reader/writer
	//  我们能做的只是尽量减少这种情况的发生，从而获得更好的并发性能
	var (
		tr    = server.round.Timer(r)
		rp    = server.round.Reader(r)
		wp    = server.round.Writer(r)
		lAddr = conn.LocalAddr().String()
		rAddr = conn.RemoteAddr().String()
	)
	if conf.Conf.Debug {
		log.Infof("start tcp serve \"%s\" with \"%s\"", lAddr, rAddr)
	}

	var (
		err     error
		rid     string
		accepts []int32
		hb      time.Duration
		p       *protocol.Proto
		b       *Bucket
		trd     *xtime.TimerData
		lastHb  = time.Now()
		rb      = rp.Get()
		wb      = wp.Get()
		ch      = NewChannel(server.c.Protocol.CliProto, server.c.Protocol.SvrProto)
		rr      = &ch.Reader
		wr      = &ch.Writer
	)
	ch.Reader.ResetBuffer(conn, rb.Bytes())
	ch.Writer.ResetBuffer(conn, wb.Bytes())
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	// 将HandShake超时时间添加到时间轮中，如果auth包超时则关闭连接
	trd = tr.Add(20*time.Second, func() { // default handshake timeout is 5s
		conn.Close()
		log.Errorf("key: %s remoteIP: %s tcp handshake timeout", ch.Key, conn.RemoteAddr().String())
	})
	ch.IP, _, _ = net.SplitHostPort(conn.RemoteAddr().String())
	if p, err = ch.CliProto.Set(); err == nil { // p是用来写的proto块
		// 1. 在Logic中为这个连接获取Mid等信息
		// 2. Comet回复这个TCP连接
		if ch.Mid, ch.Key, rid, accepts, hb, err = server.authTCP(ctx, rr, wr, p); err == nil {
			// TODO: WatchOps是做什么的？
			ch.Watch(accepts...)
			b = server.Bucket(ch.Key)
			// bucket中用map记录映射: rid --> *Room
			// bucket中用map记录映射: Key --> *Channel
			// Room中用双向链表记录Room中的所有channel(头插法)
			err = b.Put(rid, ch)
			if conf.Conf.Debug {
				log.Infof("tcp connected key:%s mid:%d proto:%+v", ch.Key, ch.Mid, p)
			}
		}
	}
	if err != nil {
		conn.Close()
		rp.Put(rb)
		wp.Put(wb)
		tr.Del(trd)
		log.Errorf("key: %s handshake failed error(%v)", ch.Key, err)
		return
	}
	trd.Key = ch.Key
	// 将trd超时时间重新设定为Heartbeat超时时间，如果超时则关闭连接
	tr.Set(trd, 5*time.Minute)
	log.Errorf("timeout reset to: %d", hb)

	go server.dispatchTCP(conn, wr, wp, wb, ch)
	serverHeartbeat := server.RandServerHeartbeat() // 10min ~ 30min random
	for {
		if p, err = ch.CliProto.Set(); err != nil {
			break
		}
		if err = p.ReadTCP(rr); err != nil {
			break
		}
		if p.Op == protocol.OpHeartbeat {
			tr.Set(trd, 5*time.Minute)
			p.Op = protocol.OpHeartbeatReply
			p.Body = nil
			if now := time.Now(); now.Sub(lastHb) > serverHeartbeat { // 距离上次发送心跳包已经超过serverHeartbeat时间，在redis中续上
				if err1 := server.Heartbeat(ctx, ch.Mid, ch.Key); err1 == nil {
					lastHb = now
				}
			}
			if conf.Conf.Debug {
				log.Infof("tcp heartbeat receive key:%s, mid:%d", ch.Key, ch.Mid)
			}
		} else {
			if err = server.Operate(ctx, p, ch, b); err != nil {
				break
			}
		}

		ch.CliProto.SetAdv()
		ch.Signal()
	}
	if err != nil && err != io.EOF && !strings.Contains(err.Error(), "closed") {
		log.Errorf("key: %s server tcp failed error(%v)", ch.Key, err)
	}
	b.Del(ch)
	tr.Del(trd)
	rp.Put(rb)
	conn.Close()
	ch.Close()
	if err = server.Disconnect(ctx, ch.Mid, ch.Key); err != nil {
		log.Errorf("key: %s mid: %d operator do disconnect error(%v)", ch.Key, ch.Mid, err)
	}
	if conf.Conf.Debug {
		log.Infof("tcp disconnected key: %s mid: %d", ch.Key, ch.Mid)
	}
}

// dispatch accepts connections on the listener and serves requests
// for each incoming connection.  dispatch blocks; the caller typically
// invokes it in a go statement.
func (s *Server) dispatchTCP(conn *net.TCPConn, wr *bufio.Writer, wp *bytes.Pool, wb *bytes.Buffer, ch *Channel) {
	var (
		err    error
		finish bool
		online int32
	)
	if conf.Conf.Debug {
		log.Infof("key: %s start dispatch tcp goroutine", ch.Key)
	}
	for {
		var p = ch.Ready() // code blocks here if no signal arrives
		if conf.Conf.Debug {
			log.Infof("key:%s dispatch msg:%v", ch.Key, *p)
		}
		switch p {
		case protocol.ProtoFinish:
			if conf.Conf.Debug {
				log.Infof("key: %s wakeup exit dispatch goroutine", ch.Key)
			}
			finish = true
			goto failed
		case protocol.ProtoReady:
			// fetch message from svrbox(client send)
			for {
				if p, err = ch.CliProto.Get(); err != nil {
					break
				}
				if p.Op == protocol.OpHeartbeatReply {
					if ch.Room != nil {
						online = ch.Room.OnlineNum()
					}
					if err = p.WriteTCPHeart(wr, online); err != nil {
						goto failed
					}
				} else {
					if err = p.WriteTCP(wr); err != nil {
						goto failed
					}
				}
				p.Body = nil // avoid memory leak
				ch.CliProto.GetAdv()
			}
		default:
			// server send
			if err = p.WriteTCP(wr); err != nil {
				goto failed
			}
			if conf.Conf.Debug {
				log.Infof("tcp sent a message key:%s mid:%d proto:%+v", ch.Key, ch.Mid, p)
			}
		}
		// only hungry flush response
		if err = wr.Flush(); err != nil {
			break
		}
	}
failed:
	if err != nil {
		log.Errorf("key: %s dispatch tcp error(%v)", ch.Key, err)
	}
	conn.Close()
	wp.Put(wb)
	// must ensure all channel message discard, for reader won't blocking Signal
	for !finish {
		finish = ch.Ready() == protocol.ProtoFinish
	}
	if conf.Conf.Debug {
		log.Infof("key: %s dispatch goroutine exit", ch.Key)
	}
}

// auth for goim handshake with client, use rsa & aes.
func (s *Server) authTCP(ctx context.Context, rr *bufio.Reader, wr *bufio.Writer, p *protocol.Proto) (
	mid int64, key, rid string, accepts []int32, hb time.Duration, err error) {
	for {
		if err = p.ReadTCP(rr); err != nil {
			return
		} //
		if p.Op == protocol.OpAuth {
			break
		} else {
			log.Errorf("tcp request operation(%d) not auth", p.Op)
		}
	}
	if mid, key, rid, accepts, hb, err = s.Connect(ctx, p, ""); err != nil {
		log.Errorf("authTCP.Connect(key:%v).err(%v)", key, err)
		return
	}
	// >>>> DEBUG <<<<
	log.Infof("p.Body: %s", p.Body)

	p.Op = protocol.OpAuthReply
	p.Body = nil
	if err = p.WriteTCP(wr); err != nil {
		log.Errorf("authTCP.WriteTCP(key:%v).err(%v)", key, err)
		return
	}
	err = wr.Flush()
	return
}
