package tl_serv

import (
	"context"
	"fmt"
	"io"
	"log"
	"net"
	"time"

	"google.golang.org/grpc"
	"google.golang.org/grpc/peer"
	pb_stream "io.plus/test_loss/pkg/stream" // 根据option go_package指定的路径引入
	"io.plus/test_loss/pkg/tl_model"
)

// server 实现了StreamService服务
type server struct {
	pb_stream.UnimplementedStreamServiceServer
	//
	jobInfos chan *tl_model.JobInfo
}

func (s *server) JobCountSaveWorker() {
	log.Printf("start JobCountSaveWorker")

	s.jobInfos = make(chan *tl_model.JobInfo, 10000)

	// repeat
	max_squash := 500
	squash := 0

	// 每秒刷新一次缓冲区
	flushInterval := 1 * time.Second
	ticker := time.NewTicker(flushInterval)
	defer ticker.Stop()

	jobCounts := make(map[string]int64)

	saveJobCounts := func() {
		for jobname, count := range jobCounts {
			err := (&tl_model.JobInfo{JobName: jobname, JobCount: count}).Save()
			if err != nil {
				log.Printf("save job info err: %v", err)
			}
		}
		jobCounts = make(map[string]int64)
	}

	for {
		func() {
			// 使用 recover 捕获 panic
			defer func() {
				if r := recover(); r != nil {
					// 捕获到 panic 后处理（比如记录日志，决定是否重启服务器等）
					log.Printf("JobCountSaveWorker Recovered from panic: %v", r)
				}
			}()

			select {
			case jobinfo := <-s.jobInfos:
				log.Printf("out queue job info: %v  %v", jobinfo.JobName, jobinfo.JobCount)
				jobCounts[jobinfo.JobName] = jobinfo.JobCount

				if squash < max_squash {
					squash++
				} else {
					squash = 0
					saveJobCounts()
				}
			case <-ticker.C:
				// flush
				if len(jobCounts) == 0 {
					return
				}
				log.Printf("ticker flush job counts: %v", jobCounts)
				saveJobCounts()
			}
		}()
	}
}

// StreamCounts 实现了StreamService服务的StreamCounts方法
// StreamCounts handles the streaming of job counts from clients. It receives job count updates,
// logs the client's IP address and the received count, stores the count in a map using a key
// composed of the client's host and job ID, and sends back a response with the last received count.
// If any error occurs during the process, it logs the error and returns it.
// This function is part of the server's implementation for handling streaming job count requests.
func (s *server) StreamCounts(stream pb_stream.StreamService_StreamCountsServer) error {
	// 使用 recover 捕获 panic
	defer func() {
		if r := recover(); r != nil {
			// 捕获到 panic 后处理（比如记录日志，决定是否重启服务器等）
			log.Printf("StreamCounts Recovered from panic: %v", r)
		}
	}()

	for {
		req, err := stream.Recv()
		if err == io.EOF {
			// 读取完毕
			return nil
		}
		if err != nil {
			// 处理错误
			log.Printf("Recv err: %v", err)
			return err
		}

		peerInfo, ok := peer.FromContext(stream.Context())
		if !ok {
			log.Panicf("stream peer.FromContext err")
		}

		// 获取客户端IP地址
		clientAddr := peerInfo.Addr.String()
		host, _, err := net.SplitHostPort(clientAddr)
		if err != nil {
			log.Panicf("SplitHostPort err: %v", err)
		}

		log.Printf("client ip: %v", host)

		last_count := req.GetCount()

		job_key := fmt.Sprintf("%v_%v", host, req.Jobid)
		// 同步save 方式
		// 2024/11/21 19:01:15 client ip: 127.0.0.1
		// 2024/11/21 19:01:15 Received job count: 127.0.0.1_123  932
		// squash 压缩写入次数后
		// 2024/11/21 19:31:08 client ip: 127.0.0.1
		// 2024/11/21 19:31:08 Received job count: 127.0.0.1_123  1179
		s.jobInfos <- &tl_model.JobInfo{JobName: job_key, JobCount: last_count}
		log.Printf("in queue job info: %v  %v", job_key, last_count)

		// 处理请求
		go func() {
			// 处理请求
			log.Printf("Received job count: %v  %v", job_key, last_count)
		}()
		// 发送响应
		if err := stream.Send(&pb_stream.CountResponse{Result: last_count}); err != nil {
			return err
		}
	}
}

func (s *server) JobMaxCount(ctx context.Context, req *pb_stream.JobMaxCountRequest) (resp *pb_stream.JobMaxCountResponse, return_err error) {
	// 使用 recover 捕获 panic
	defer func() {
		if r := recover(); r != nil {
			// 捕获到 panic 后处理（比如记录日志，决定是否重启服务器等）
			log.Printf("JobMaxCount Recovered from panic: %v", r)
			resp = &pb_stream.JobMaxCountResponse{ErrInfo: fmt.Sprintf("%v", r)}
			return
		}
	}()

	peerInfo, ok := peer.FromContext(ctx)
	if !ok {
		log.Panicf("peer.FromContext err")
	}

	// 获取客户端IP地址
	clientAddr := peerInfo.Addr.String()
	host, _, err := net.SplitHostPort(clientAddr)
	if err != nil {
		log.Panicf("SplitHostPort err: %v", err)
	}

	job_key := fmt.Sprintf("%v_%v", host, req.Jobid)
	job, err := tl_model.GetJobInfo(job_key)
	if err != nil {
		log.Panicf("GetJobInfo err: %v", err)
	}

	count := job.JobCount
	if !ok {
		count = 0
	}
	resp = &pb_stream.JobMaxCountResponse{Maxcount: count}
	go func() {
		log.Printf("return Jobid: %v, Maxcount: %v", job_key, count)
	}()
	return
}

func Run(addr string) {
	lis, err := net.Listen("tcp", addr)
	if err != nil {
		log.Fatalf("failed to listen: %v", err)
	}
	s := grpc.NewServer()
	_serv := &server{}
	go _serv.JobCountSaveWorker()
	pb_stream.RegisterStreamServiceServer(s, _serv)
	if err := s.Serve(lis); err != nil {
		log.Fatalf("failed to serve: %v", err)
	}
}
