package bak

import (
	"net/http"
	"strings"
	"strconv"
	"gateway/conf"
	"gateway/lib"
	"github.com/funlake/gopkg/utils"
	"github.com/funlake/gopkg/utils/log"
	"github.com/funlake/gopkg/jobworker"
	"github.com/valyala/fasthttp"
	"os"
	"time"
	"errors"
	"bytes"
	//"golang.org/x/net/lex/httplex"
	"github.com/funlake/gopkg/breaker"
	"bufio"
	"fmt"
	"github.com/json-iterator/go"
)
var (
	dispatcher *jobworker.NonBlockingDispatcher
	globalTransport = &http.Transport{
		DisableKeepAlives : false,
		MaxIdleConnsPerHost : conf.GetConfig().GetEnvInt("connection_per_host"),
		//不要设置太短时间，否则会出现close_wait过多的情况，
		// 因为这边在close_wait状态关停，而没有继续发fin到对方，造成close_wait卡死现象
		//TLSHandshakeTimeout:time.Millisecond * 100,
		//DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
		//
		//	return net.DialTimeout(network,addr,time.Duration(time.Second * 3))
		//},
	}
	fasthttpClient = & fasthttp.Client{}
	maxWorker int
	queueSize int
)

type Request struct {
	timeout int
}
type response struct{
	code int
	body string
	err  error
	dur  time.Duration
}
type circuitbreaker struct{
	Timeout int `json:"t"`
	Rate int `json:"r"`
	Window int `json:"w"`
	Min int `json:"m"`
}
func NewRequest()(*Request){
	maxWorker,_ = strconv.Atoi(os.Getenv("GATEWAY_WORKER_SIZE"))
	if maxWorker == 0{
		maxWorker = 150
	}
	queueSize,_ = strconv.Atoi(os.Getenv("GATEWAY_JOBQUEUE_SIZE"))
	if queueSize == 0{
		queueSize = 500
	}
	dispatcher = jobworker.NewNonBlockingDispather(maxWorker,queueSize)
	log.Success("Worker number:%d,Queue size:%d",maxWorker,queueSize)
	return &Request{
		timeout: conf.GetConfig().GetEnvInt("upstream_timeout_second"),
	}

}
func (this *Request)ProxyHttpRequest(proxy lib.PassProxy,ctx *fasthttp.RequestCtx) (flag bool,code int,msg string,dur time.Duration){
	r := this.Do(proxy,ctx)
	if  r.err != nil {
		return false , r.code, r.err.Error(),r.dur
	}
	//resp := r.Response
	//defer resp.Body.Close()
	//body, err := ioutil.ReadAll(resp.Body)
	//body := new (bytes.Buffer)
	//body.ReadFrom(resp.Body)
	//if err != nil {
	//	return false , 500, err.Error(),0
	//}
	if r.code != 200 {
		//this.PassHeaders(resp,ctx)
		//this.ProxySetRespHeader(resp.Header)
		sv,_ :=  conf.GetConfig().Get("status").GetString(strconv.Itoa(r.code),"")
		//go aliyunlog.Write("gateway","route_proxy_business_error",aliyunlog.Content{fmt.Sprintf("code:%d,api:%s,result:%s",resp.StatusCode,proxy.Mapkey,utils.ByteToStr(body)),"fail"},true)
		return false,r.code,sv,0
	}
	//go lib.SyncMonitor("access", "method="+proxyStruct.Method+",domain="+proxyStruct.Vhost+",route="+proxyStruct.Mapkey, "success=1")
	return true , 200 , r.body,r.dur
}
//func (this *Request) Do(passproxy *lib.PassProxy,ctx *fasthttp.RequestCtx) (resp jobworker.HttpProxyJobResponse) {
func (this *Request) Do(passproxy lib.PassProxy,ctx *fasthttp.RequestCtx) (resp response) {
	serviceUrl := passproxy.Scheme+"://"+passproxy.Vhost+passproxy.Domain+"/"+passproxy.Service
	qs := passproxy.QueryString
	//copy query string
	if strings.Contains(qs,"?") {
		qss := strings.Split(qs, "?")
		if len(qss) > 1 {
			serviceUrl = serviceUrl + "?" + qss[1]
			//u, err := url.ParseQuery(qss[1])
			//if err != nil {
			//	//fmt.Println(err)
			//	log.Warning("%s",err.Error())
			//}
			//q := req.URL.Query()
			//for k, v := range u {
			//	q.Add(k, v[0])
			//}
			//req.URL.RawQuery = q.Encode()
		}
	}

	req, _ := http.NewRequest(passproxy.Method, serviceUrl , strings.NewReader(utils.ByteToStr(passproxy.PostData)))
	//req := fasthttp.AcquireRequest()
	//copy header
	passproxy.Header.VisitAll(func(key, value []byte) {
		val :=  utils.ByteToStr(value)
		//验证header值是否legal,否则会导致请求panic
		if ValidHeaderFieldValue(val){
			req.Header.Set(utils.ByteToStr(key),val)
		}
	})
	req.Header.Set("Proxy-Service",serviceUrl)
	//request包请求会自带压缩头，这里不设置，则proxy返回将是压缩后的http body
	req.Header.Set("Accept-Encoding","")
	cipher := os.Getenv("GATEWAY_CIPHER")
	if cipher == ""{
		cipher = "g-w2&d0yx1j7"
	}
	req.Header.Set("Etccb-Gwcipher",cipher)

	job := jobworker.NewHttpProxyJob(globalTransport,req,maxWorker,passproxy.Mapkey)
	//job := worker.httpProxyJob{req,make(chan worker.RequestResponse),passproxy.Mapkey}
	now := time.Now()
	brek := breaker.NewBreaker(passproxy.Mapkey)
	cbsetting := getCircuitBreaker(passproxy.Mapkey)
	brek.SetTimemout(cbsetting.Timeout)
	brek.SetRate(cbsetting.Rate)
	brek.SetWindow(cbsetting.Window)
	brek.SetMin(cbsetting.Min)
	rt   := make(chan response)
	postshow := passproxy.PostData[:min(len(passproxy.PostData),20)]
	//var r   jobworker.HttpProxyJobResponse
	brek.Run(func() {
		if !dispatcher.Put(job) {
			rt <- response{503,"",errors.New("服务繁忙,请求队列已满"),0}
			return
		}
		r := <- job.GetResChan()
		if r.Error == nil {
			buf := new(bytes.Buffer)
			buf.ReadFrom(r.Response.Body)
			utils.WrapGo(func() {
				select {
				case rt <- response{r.Response.StatusCode, buf.String(), nil, r.Dur}:
					for hk, hv := range r.Response.Header {
						ctx.Response.Header.Set(hk, strings.Join(hv, ";"))
					}
				case <- time.After(time.Millisecond * 10):
					log.Warning("%s,%s",readHeader(ctx.Request.Header.String()+fmt.Sprintf("Response-Time: %s",time.Since(now))+" Proxy-Upstream: "+passproxy.Mapkey),postshow)
					//log.Info("%s -> 超时请求时间消耗 : %s", job.Id(), time.Since(now))
				}
				if r.Error == nil {
					r.Response.Body.Close()
				}
			}, "response_set")
		}else{
			rt <- response{conf.REQUEST_TIMEOUT,"",r.Error,0}
		}
		//}, "http-request-release")
	}, func() {
		//rt <- r
		log.Success("%s,%s",readHeader(ctx.Request.Header.String()+fmt.Sprintf("Response-Time: %s",time.Since(now))+" Proxy-Upstream: "+passproxy.Mapkey),postshow)
		//log.Info("%s -> 请求时间消耗 : %s",job.Id(),time.Since(now))
		//job.Release()
	}, func(run bool) {
		if run {
			rt <- response{conf.REQUEST_TIMEOUT,"",errors.New("[cb]服务超时"),0}
		}else{
			rt <- response{conf.REQUEST_SERVICE_TEMPORARY_DOWN,"",errors.New("[cb]服务当前不可用,请稍后重试"),0}
		}
	})
	return <-rt
	//if (dispatcher.Put(job)){
	//	//rctx,requestCancel := context.WithCancel(context.Background())
	//	//defer requestCancel()
	//	//job.Q.WithContext(rctx)
	//	select {
	//	//快速返回
	//	case <-time.After(time.Second * time.Duration(this.timeout)):
	//		//can we stop the request immediately?
	//		utils.WrapGo(func() {
	//			//超时后释放worker,否则worker因为job.R没被消耗
	//			//在并发高，且每次请求超过超时时间的情况下,会因为job.R未被消耗
	//			//而使池中堆满了job.R未处理的worker,从而使得后续处理分配不到worker
	//			r := <-job.GetResChan()
	//			//这里才是CLOSE_WAIT的罪魁祸首,断开tcp连接需4个步骤，从ESTABLISHED状态 -> 接收FIN(步骤1) -> 发送ACK(步骤2) ,如果正常，接下来应该发送FIN等待对方确认
	//			//但是如果不做Close,就一直不会发送FIN ,状态一直停留在CLOSE_WAIT,唯有重启服务才能重置
	//			//随时间推移,将会无效的占用大量tcp连接以及端口
	//			if  r.Error == nil {
	//				r.response.Body.Close()
	//			}
	//			log.Info("%s -> 请求时间消耗 : %s",job.Id(),time.Since(now))
	//			job.Release()
	//		},"http-request-timeout")
	//		return jobworker.HttpProxyJobResponse{nil, errors.New(fmt.Sprintf("超时%d秒", this.timeout)), 0}
	//	case r := <-job.GetResChan():
	//		log.Info("%s -> 请求时间消耗 : %s",job.Id(),time.Since(now))
	//		job.Release()
	//		return r
	//	}
	//} else{
	//	return jobworker.HttpProxyJobResponse{nil, errors.New("队列已满"),0}
	//}
}

func (this *Request) PassHeaders(resp *http.Response,ctx *fasthttp.RequestCtx){
	for hk,hv := range resp.Header {
		ctx.Response.Header.Set(hk,strings.Join(hv,";"))
	}
}

func ValidHeaderFieldValue(v string) bool {
	for i := 0; i < len(v); i++ {
		b := v[i]
		if isCTL(b) && !isLWS(b) {
			return false
		}
	}
	return true
}

func isCTL(b byte) bool {
	const del = 0x7f // a CTL
	return b < ' ' || b == del || b == ';'
}

func isLWS(b byte) bool { return b == ' ' || b == '\t' }
func readHeader(header string) [][]byte {
	reader := strings.NewReader(header)
	scaner := bufio.NewScanner(reader)
	//scaner.Scan()
	var t [][]byte
	//var s [][]byte
	for scaner.Scan() {
		t = append(t,scaner.Bytes())
		//log.Success(scaner.Text())
	}
	//copy(s,t)
	return t
}

func min(x, y int) int {
	if x < y {
		return x
	}
	return y
}

func getCircuitBreaker(service string) *circuitbreaker{
	cache := lib.NewCache()
	cb,err := cache.GetCircuitBreakerSetting(service)
	rcb :=  &circuitbreaker{
		Timeout: 3600,
		Rate: 50,
		Window: 30,
		Min: 5,
	}
	if err != nil{
		//log.Error(err.Error()+service)
		return rcb
	}
	jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal([]byte(cb),rcb)
	return rcb
}