package mytest

import (
	//"bufio"
	//"fmt"
	kcp "github.com/xtaci/kcp-go"
	"github.com/xtaci/smux"
	"io"
	"log"
	"net"
	"os"
	"os/signal"
	"syscall"
	"time"
)

type Service struct {
	channel chan bool
	config  Config
}

func NewService(config Config) *Service {
	s := &Service{
		channel: make(chan bool),
		config:  config,
	}
	return s
}

func checkError(err error) {
	if err != nil {
		log.Printf("%+v\n", err)
		os.Exit(-1)
	}
}

var serviceObj *Service

func Start(ipPort string, remoteAddr string) {
	config := Config{}
	config.LocalAddr = ipPort
	config.RemoteAddr = remoteAddr
	config.SockBuf = 65535
	config.AutoExpire = 0
	config.SnmpLog = ""
	config.SnmpPeriod = 60

	service := NewService(config)
	serviceObj = service

	go startListen(service)
}

func Stop() {
	log.Println("Stop Listener")
	go safeStop()

}

func safeStop() {
	defer func() {
		if err := recover(); err != nil {
			log.Println("work faled:", err)
		}
	}()
	close(serviceObj.channel)
	//muxes[0].session.Close()
	//listener.Close()
	//listener = nil
}

type muxSession struct {
	session *smux.Session
	ttl     time.Time
}

//var muxes []muxSession
//var listener *net.TCPListener

func startListen(service *Service) {
	config := service.config

	addr, err := net.ResolveTCPAddr("tcp", config.LocalAddr)
	checkError(err)
	listener1, err := net.ListenTCP("tcp", addr)
	listener := listener1
	checkError(err)
	pass := "hello"
	var block kcp.BlockCrypt
	block, _ = kcp.NewNoneBlockCrypt([]byte(pass))

	log.Println("listening on:", listener.Addr())
	log.Println("remoteAddr:", config.RemoteAddr)

	smuxConfig := smux.DefaultConfig()
	smuxConfig.MaxReceiveBuffer = config.SockBuf

	//smux --> kcpConn
	createConn := func() (*smux.Session, error) {
		log.Println("CreateConn")

		kcpconn, err := kcp.DialWithOptions(config.RemoteAddr, block, 10, 3)
		checkError(err)
		kcpconn.SetStreamMode(true)
		//nodelay 30ms时钟 丢包两次2快速重传 关闭流量控制
		//fast2
		kcpconn.SetNoDelay(1, 20, 2, 1)
		kcpconn.SetWindowSize(128, 512)
		kcpconn.SetMtu(1350)
		kcpconn.SetACKNoDelay(true)
		kcpconn.SetKeepAlive(10)
		kcpconn.SetDSCP(0)
		kcpconn.SetReadBuffer(config.SockBuf)
		kcpconn.SetWriteBuffer(config.SockBuf)
		var session *smux.Session
		session, err = smux.Client(kcpconn, smuxConfig)
		return session, nil
	}

	waitConn := func() *smux.Session {
		for {
			if session, err := createConn(); err == nil {
				return session
			} else {
				time.Sleep(time.Second)
			}
		}
	}

	numConn := 1
	muxes := make([]muxSession, numConn)

	for k := range muxes {
		sess, err := createConn()
		checkError(err)
		muxes[k].session = sess
		muxes[k].ttl = time.Now().Add(time.Duration(0) * time.Second)
	}

	chScavenger := make(chan *smux.Session, 128)
	go scavenger(chScavenger)
	//go snmpLogger(config.SnmpLog, config.SnmpPeriod)

	rr := 0
	ch := make(chan os.Signal)
	signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)

	defer func() {
		log.Println("Finish ProxyServer")
		listener.Close()
		muxes[0].session.Close()
		//log.Println(<-ch)
	}()

	for {
		select {
		case <-service.channel:
			log.Println("stop listenning on", listener.Addr())
			return
		default:
		}

		listener.SetDeadline(time.Now().Add(1e9))
		p1, err := listener.AcceptTCP()
		if err != nil {
			log.Println(err)
			if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
				continue
			} else {
				break
			}
		}

		if err := p1.SetReadBuffer(config.SockBuf); err != nil {
			log.Println("TCP SetReadBuffer:", err)
		}

		if err := p1.SetWriteBuffer(config.SockBuf); err != nil {
			log.Println("TCP SetWriteBuffer:", err)
		}

		checkError(err)
		idx := rr % numConn

		//不要重连状态不对了 业务逻辑层的包怎么办
		// do auto expiration && reconnection
		if muxes[idx].session.IsClosed() || (config.AutoExpire > 0 && time.Now().After(muxes[idx].ttl)) {
			chScavenger <- muxes[idx].session
			muxes[idx].session = waitConn()
			muxes[idx].ttl = time.Now().Add(time.Duration(config.AutoExpire) * time.Second)
		}

		go handleClient(muxes[idx].session, p1)
		rr++
	}
}

//TCP <---> KCP
//TCP -->SendData
func handleClient(sess *smux.Session, p1 io.ReadWriteCloser) {
	p2, err := sess.OpenStream()
	if err != nil {
		return
	}

	log.Println("stream opened")
	defer log.Println("stream closed")
	defer p1.Close()
	defer p2.Close()

	// start tunnel
	p1die := make(chan struct{})
	go func() { io.Copy(p1, p2); close(p1die) }()

	p2die := make(chan struct{})
	go func() { io.Copy(p2, p1); close(p2die) }()

	// wait for tunnel termination
	select {
	case <-p1die:
	case <-p2die:
	}
}

type scavengeSession struct {
	session *smux.Session
	ttl     time.Time
}

const (
	maxScavengeTTL = 10 * time.Minute
)

func scavenger(ch chan *smux.Session) {
	ticker := time.NewTicker(30 * time.Second)
	defer ticker.Stop()
	var sessionList []scavengeSession
	for {
		select {
		case sess := <-ch:
			sessionList = append(sessionList, scavengeSession{sess, time.Now()})
		case <-ticker.C:
			var newList []scavengeSession
			for k := range sessionList {
				s := sessionList[k]
				if s.session.NumStreams() == 0 || s.session.IsClosed() || time.Since(s.ttl) > maxScavengeTTL {
					log.Println("session scavenged")
					s.session.Close()
				} else {
					newList = append(newList, sessionList[k])
				}
			}
			sessionList = newList
		}
	}
}

/*
func main() {
	Start("127.0.0.1:9091", "127.0.0.1:9092")
	reader := bufio.NewReader(os.Stdin)
	reader.ReadString('\n')
}
*/
