package server

import (
	"epg/conf"
	"epg/jmconf"
	"epg/privilege"
	"epg/stats"
	"epg/utils"
	"fmt"
	"net"
	"os"
	"os/signal"
	"path/filepath"
	"sync"
	"syscall"
	"time"

	"github.com/zeast/logs"
)

//server signal and wait group
var (
	wg  sync.WaitGroup
	die = make(chan struct{})
	// halt       sync.RWMutex //the global lock for program
	listenHost = ":6603"
)

//SlowQuery
var (
	// Server  *server
	SlowLog *slowLog
)

//Server the server.
type Server struct {
	sync.Mutex
	cfg *conf.Config
}

//New new server
func New() *Server {
	s := new(Server)
	logs.Debug("开始初始化配置文件")
	if err := s.parseConfig(); err != nil {
		logs.Fatalf("Parse Config err: %v", err)
	}
	logs.Debug("配置文件初始化成功")

	//Checker
	if err := privilege.InitAuthChecker(s.cfg.Proxy.AuthDSN); err != nil {
		logs.Fatal(err)
	}

	//set counter
	InitCountMgr(privilege.AuthChecker.UserList(), s.cfg.Proxy.Backlog)
	privilege.AuthChecker.SetNofity(CountMgr)

	//slowquery
	logFile := filepath.Join(jmconf.Cfg.LogPath, "slowquery.log")
	_sl, err := newSlowLog(s.cfg.Proxy.SlowQuery, logFile)
	if err != nil {
		fmt.Println(err)
		os.Exit(-1)
	}
	SlowLog = _sl

	// Server = s
	return s
}

//ParseConfig parse Config from toml file path.
func (s *Server) parseConfig() error {
	c := conf.NewConfig(&jmconf.Cfg.DisCfg)

	//regist the callback before Parse.
	c.RegistNodesHandle(jmconf.OPAdd, NodeMgr.AddNode)
	c.RegistNodesHandle(jmconf.OPDelete, NodeMgr.RemoveNode)
	c.RegistNodesHandle(jmconf.OPModify, NodeMgr.ModifyNode)
	if err := c.Run(); err != nil {
		return err
	}

	//close the old one.
	s.Lock()
	if s.cfg != nil {
		logs.Debug("close old config ...")
		s.cfg.Close()
	}
	s.cfg = c
	s.Unlock()
	return nil
}

//Run  run the server
func (s *Server) Run() error {
	addr, err := net.ResolveTCPAddr("tcp", jmconf.Cfg.Listen)
	if err != nil {
		return err
	}
	listen, err := net.ListenTCP("tcp4", addr)
	if err != nil {
		return err
	}

	logs.Infof("Server Running on addr: %v", addr.String())

	//handle sys signal
	go s.handleSignal()
	go s.statsTask()

	//main loop accept connection
LOOP:
	for {
		conn, err := listen.AcceptTCP()
		if err != nil {
			logs.Error(err)
			continue
		}
		select {
		case <-die:
			listen.Close()
			break LOOP
		default:
		}
		go s.onConn(conn)
	}
	for {
		<-time.After(time.Second)
	}
}

func (s *Server) onConn(conn *net.TCPConn) {
	defer utils.PrintPanicStack()
	wg.Add(1)
	defer wg.Done()

	//set conn
	conn.SetReadBuffer(rcvBuffer)
	conn.SetWriteBuffer(sndBuffer)
	conn.SetKeepAlive(true)

	//new Client
	client := newClient(conn)
	client.Run()
}

func (s *Server) statsTask() {
	defer utils.PrintPanicStack()
	for {
		select {
		case <-die:
			return
		case <-time.After(30 * time.Second):
			for user, counter := range CountMgr.GetCounters() {
				if user == "" || counter.Size() == 0 {
					continue
				}
				//stats
				stats.Stater.Gauge(fmt.Sprintf("users.%s.cur", user), uint64(counter.Size()))
			}
		}
	}
}

func (s *Server) handleSignal() {
	defer utils.PrintPanicStack()
	sc := make(chan os.Signal, 1)
	signal.Notify(sc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
	)
	for {
		sig := <-sc
		switch sig {
		case syscall.SIGHUP:
			logs.Infof("Got signal [%v] to reload.", sig)
			privilege.AuthChecker.Reload()
			//reset the log level
			err := jmconf.ReloadConfig()
			if err != nil {
				logs.Error(err)
				continue
			}
			//logs.SetLvl(jmconf.Cfg.LogLvl)
			logs.Info("EPGProxy Server reload all!")
		case syscall.SIGINT, syscall.SIGTERM:
			logs.Warnf("Got signal [%v] to exit.", sig)
			close(die)
			logs.Info("Waiting for client close, please wait...")
			logs.Info("EPGProxy Server will shutdown, byebye!")
			//wait
			wg.Wait()
			os.Exit(0)
		}
	}
}
