package main

import (
	"bytes"
	"context"
	"encoding/json"
	"fmt"
	"math/rand"
	"net/http"
	"strconv"
	"sync"
	"time"

	"github.com/sirupsen/logrus"
)

type SnailJobRequest struct {
	ReqID int64       `json:"reqId"`
	Args  interface{} `json:"args"`
}

type NettyResult struct {
	ReqID  int64       `json:"reqId"`
	Status StatusEnum  `json:"status"`
	Data   interface{} `json:"data"`
}

type StatusEnum int

const (
	STATUS_FAILED  StatusEnum = 0
	STATUS_SUCCESS StatusEnum = 1
)

type key string

const (
	EXECUTOR_MANAGER_KEY key = "executor_manager"
)

func GenerateHostId(length int) string {
	result := "go-"
	for i := 0; i < length-3; i++ {
		result += strconv.Itoa(rand.Intn(10))
	}
	return result
}

const (
	SNAIL_SERVER_HOST = "127.0.0.1"
	SNAIL_SERVER_PORT = "1788"
	SNAIL_HOST_IP     = "127.0.0.1"
	SNAIL_HOST_PORT   = "1789"
	SNAIL_NAMESPACE   = "764d604ec6fc45f68cd92514c40e9e1a"
	SNAIL_GROUP_NAME  = "snail_job_demo_group"

	SNAIL_LOG_LOCAL_FILENAME     = "snail_job.log"
	SNAIL_LOG_REMOTE_BUFFER_SIZE = 10
	SNAIL_LOG_REMOTE_INTERVAL    = 10
)

var HEADERS = map[string]string{
	"Content-Type": "application/json",
	"host-id":      GenerateHostId(20),
	"host-ip":      SNAIL_HOST_IP,
	"version":      "1.0.0",
	"host-port":    SNAIL_HOST_PORT,
	"namespace":    SNAIL_NAMESPACE,
	"group-name":   SNAIL_GROUP_NAME,
	"token":        "SJ_Wyz3dmsdbDOkDujOTSSoBjGQP1BMsVnj",
}

func SendHeartbeat() {
	URI := "beat"
	for {
		request := SnailJobRequest{
			ReqID: GenerateReqID(),
		}

		requestBody, err := json.Marshal(request)
		if err != nil {
			LocalLog.Printf("Failed to marshal request: %v", err)
			continue
		}

		url := fmt.Sprintf("http://%s:%s/%s", SNAIL_SERVER_HOST, SNAIL_SERVER_PORT, URI)
		req, _ := http.NewRequest("POST", url, bytes.NewBuffer(requestBody))
		for key, value := range HEADERS {
			req.Header.Set(key, value)
		}

		resp, err := (&http.Client{}).Do(req)
		if err != nil {
			LocalLog.Printf("Failed to send heartbeat: %v", err)
			time.Sleep(30 * time.Second)
			continue
		}

		var serverResponse NettyResult
		if err := json.NewDecoder(resp.Body).Decode(&serverResponse); err != nil {
			LocalLog.Printf("Failed to decode server response: %v", err)
			time.Sleep(30 * time.Second)
			continue
		}

		if request.ReqID != serverResponse.ReqID {
			LocalLog.Println("reqId 不一致的!")
			time.Sleep(30 * time.Second)
			continue
		}

		if serverResponse.Status == STATUS_SUCCESS {
			LocalLog.Printf("发送心跳成功: reqId=%d", request.ReqID)
		} else {
			LocalLog.Printf("发送心跳失败: %s", serverResponse.Data)
		}

		time.Sleep(30 * time.Second)
	}
}

func SendToServer(uri string, payload interface{}, jobName string) {
	request := SnailJobRequest{
		ReqID: GenerateReqID(),
		Args:  []interface{}{payload},
	}

	requestBody, err := json.Marshal(request)
	if err != nil {
		LocalLog.Printf("Failed to marshal request: %v", err)
		return
	}

	url := fmt.Sprintf("http://%s:%s/%s", SNAIL_SERVER_HOST, SNAIL_SERVER_PORT, uri)
	req, _ := http.NewRequest("POST", url, bytes.NewBuffer(requestBody))
	for key, value := range HEADERS {
		req.Header.Set(key, value)
	}
	resp, err := (&http.Client{}).Do(req)
	if err != nil {
		LocalLog.Printf("%s失败: %v", jobName, err)
		return
	}

	var serverResponse NettyResult
	if err := json.NewDecoder(resp.Body).Decode(&serverResponse); err != nil {
		LocalLog.Printf("Failed to decode server response: %v", err)
		return
	}

	if request.ReqID != serverResponse.ReqID {
		LocalLog.Println("reqId 不一致的!")
		return
	}

	if serverResponse.Status == STATUS_SUCCESS {
		LocalLog.Printf("%s成功: reqId=%d", jobName, request.ReqID)
	} else {
		LocalLog.Printf("%s失败: %s", jobName, serverResponse.Data)
	}
}

func SendDispatchResult(payload interface{}) {
	URI := "report/dispatch/result"
	SendToServer(URI, payload, "结果上报")
}

func SendBatchLogReport(payload []JobLogTask) {
	URI := "batch/server/report/log"
	SendToServer(URI, payload, "日志批量上报")
}

func GenerateReqID() int64 {
	return time.Now().UnixMilli()
}

type SnailLogContext struct {
	JobID       int
	TaskID      int
	TaskBatchID int
}

var (
	LocalLog  = logrus.New()
	RemoteLog = logrus.New()
)

func ConfigLoggers() {
	RemoteLog.AddHook(&MyHook{})
	RemoteLog.SetLevel(logrus.DebugLevel)
}

type MyHook struct{}

func (hook *MyHook) Levels() []logrus.Level {
	return logrus.AllLevels
}

func (hook *MyHook) Fire(entry *logrus.Entry) error {
	// 在这里实现将日志发送到远程服务器的代码
	return nil
}

type DispatchJobRequest struct {
	ReqID int64 `json:"reqId"`
	Args  []struct {
		NamespaceId         string          `json:"namespaceId" description:"namespaceId 不能为空"`
		JobId               int             `json:"jobId" description:"jobId 不能为空"`
		TaskBatchId         int             `json:"taskBatchId" description:"taskBatchId 不能为空"`
		TaskId              int             `json:"taskId" description:"taskId 不能为空"`
		TaskType            JobTaskTypeEnum `json:"taskType" description:"taskType 不能为空"`
		GroupName           string          `json:"groupName" description:"group 不能为空"`
		ParallelNum         int             `json:"parallelNum" description:"parallelNum 不能为空"`
		ExecutorType        int             `json:"executorType" description:"executorType 不能为空"`
		ExecutorInfo        string          `json:"executorInfo" description:"executorInfo 不能为空"`
		ExecutorTimeout     int             `json:"executorTimeout" description:"executorTimeout 不能为空"`
		ArgsStr             *string         `json:"argsStr,omitempty"`
		ShardingTotal       *int            `json:"shardingTotal,omitempty"`
		ShardingIndex       *int            `json:"shardingIndex,omitempty"`
		WorkflowTaskBatchId *int            `json:"workflowTaskBatchId,omitempty"`
		WorkflowNodeId      *int            `json:"workflowNodeId,omitempty"`
		RetryCount          *int            `json:"retryCount,omitempty"`
		RetryScene          *int            `json:"retryScene,omitempty" description:"重试场景 auto、manual"`
		IsRetry             bool            `json:"isRetry" description:"是否是重试流量"`
	} `json:"args"`
}

type StopJobRequest struct {
	ReqID int64 `json:"reqId"`
	Args  []struct {
		TaskBatchID int `json:"taskBatchId"`
	} `json:"args"`
}

type ExecuteResult struct {
	Success StatusEnum `json:"success"`
	Message string     `json:"message"`
}

type JobTaskBatchStatusEnum int

const (
	BATCH_STATUS_WAITING JobTaskBatchStatusEnum = iota + 1
	BATCH_STATUS_RUNNING
	BATCH_STATUS_SUCCESS
	BATCH_STATUS_FAIL
	BATCH_STATUS_STOP
	BATCH_STATUS_CANCEL
)

type JobTaskTypeEnum int

const (
	TASK_TYPE_CLUSTER = iota + 1
	TASK_TYPE_BROADCAST
	TASK_TYPE_SHARDING
)

type DispatchJobResult struct {
	JobId               int                    `json:"jobId"`
	TaskBatchId         int                    `json:"taskBatchId"`
	WorkflowTaskBatchId *int                   `json:"workflowTaskBatchId,omitempty"`
	WorkflowNodeId      *int                   `json:"workflowNodeId,omitempty"`
	TaskId              int                    `json:"taskId"`
	TaskType            JobTaskTypeEnum        `json:"taskType"`
	GroupName           string                 `json:"groupName"`
	TaskStatus          JobTaskBatchStatusEnum `json:"taskStatus"`
	ExecuteResult       ExecuteResult          `json:"executeResult"`
	RetryScene          *int                   `json:"retryScene,omitempty"`
	IsRetry             bool                   `json:"isRetry"`
}

func BuildDispatchJobResult(dispatchJobRequest DispatchJobRequest, executeResult ExecuteResult) DispatchJobResult {
	args := dispatchJobRequest.Args[0]
	taskStatus := BATCH_STATUS_FAIL
	if executeResult.Success == STATUS_SUCCESS {
		taskStatus = BATCH_STATUS_SUCCESS
	}
	return DispatchJobResult{
		JobId:               args.JobId,
		TaskBatchId:         args.TaskBatchId,
		WorkflowTaskBatchId: args.WorkflowTaskBatchId,
		WorkflowNodeId:      args.WorkflowNodeId,
		TaskId:              args.TaskId,
		TaskType:            args.TaskType,
		GroupName:           args.GroupName,
		TaskStatus:          taskStatus,
		ExecuteResult:       executeResult,
		RetryScene:          args.RetryScene,
		IsRetry:             args.IsRetry,
	}
}

type ExecutorManager struct {
	executors     map[string]func(string) ExecuteResult
	executorPools map[int]*sync.Pool
	lock          sync.Mutex
}

func NewExecutorManager() *ExecutorManager {
	return &ExecutorManager{
		executors:     make(map[string]func(string) ExecuteResult),
		executorPools: make(map[int]*sync.Pool),
	}
}

func (m *ExecutorManager) Register(name string, executor func(string) ExecuteResult) {
	m.lock.Lock()
	defer m.lock.Unlock()

	if _, exists := m.executors[name]; exists {
		panic(fmt.Sprintf("Executor [%s] already registered", name))
	}

	m.executors[name] = executor
	LocalLog.Info(fmt.Sprintf("Registered executor: %s", name))
}

func (m *ExecutorManager) Execute(req DispatchJobRequest) ExecuteResult {
	defer func() {
		if err := recover(); err != nil {
			RemoteLog.Error(err)
		}
	}()

	if len(req.Args) == 0 {
		return ExecuteResult{Success: STATUS_SUCCESS, Message: "args cannot be empty"}
	}

	args := req.Args[0]
	executor, exists := m.executors[args.ExecutorInfo]
	if !exists {
		return ExecuteResult{Success: STATUS_SUCCESS, Message: fmt.Sprintf("Executor not found: %s", args.ExecutorInfo)}
	}

	LocalLog.Info(fmt.Sprintf("Executing with executor: %s", args.ExecutorInfo))
	/*ctx := */ context.WithValue(context.Background(), "logContext", SnailLogContext{
		JobID:       args.JobId,
		TaskID:      args.TaskId,
		TaskBatchID: args.TaskBatchId,
	})

	result := executor(*args.ArgsStr)

	SendDispatchResult(BuildDispatchJobResult(req, result))

	return result
}

func (m *ExecutorManager) Stop(req StopJobRequest) {
	if len(req.Args) == 0 {
		LocalLog.Info("args cannot be empty")
		return
	}

	args := req.Args[0]
	m.lock.Lock()
	defer m.lock.Unlock()

	if pool, exists := m.executorPools[args.TaskBatchID]; exists {
		pool.Put(nil)
		delete(m.executorPools, args.TaskBatchID)
		LocalLog.Info(fmt.Sprintf("Stopped task batch: %d", args.TaskBatchID))
	}
}

func HandleDispatch(w http.ResponseWriter, r *http.Request) {
	var req DispatchJobRequest
	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
		LocalLog.Fatal(err.Error(), http.StatusBadRequest)
		return
	}

	LocalLog.Info(fmt.Sprintf("Received job dispatch request: reqId=%d", req.ReqID))
	manager := r.Context().Value(EXECUTOR_MANAGER_KEY).(*ExecutorManager)

	go manager.Execute(req)

	json.NewEncoder(w).Encode(NettyResult{
		Status: STATUS_SUCCESS,
		ReqID:  req.ReqID,
		Data:   true,
	})
}

func HandleStop(w http.ResponseWriter, r *http.Request) {
	var req StopJobRequest
	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
		LocalLog.Fatal(err.Error(), http.StatusBadRequest)
		return
	}

	LocalLog.Info(fmt.Sprintf("Received job stop request: reqId=%d", req.ReqID))
	manager := r.Context().Value(EXECUTOR_MANAGER_KEY).(*ExecutorManager)
	manager.Stop(req)

	json.NewEncoder(w).Encode(NettyResult{
		Status: STATUS_SUCCESS,
		ReqID:  req.ReqID,
		Data:   true,
	})
}

func LoggingMiddleware(next http.Handler) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		start := time.Now()
		LocalLog.Printf("Started %s %s", r.Method, r.RequestURI)
		LocalLog.Printf("Request Headers: %v", r.Header)

		// 检查是否有正确Token的头部
		if r.Header.Get("Sj-Token") != HEADERS["token"] {
			http.Error(w, "Method Not Allowed", http.StatusNonAuthoritativeInfo)
			return
		}

		// 检查是否为 Post
		if r.Method != http.MethodPost {
			http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
			return
		}

		next.ServeHTTP(w, r)

		LocalLog.Printf("Completed %s in %v", r.RequestURI, time.Since(start))
	})
}

func RunServer(manager *ExecutorManager) {
	mux := http.NewServeMux()

	mux.HandleFunc("/job/dispatch/v1", func(w http.ResponseWriter, r *http.Request) {
		ctx := context.WithValue(r.Context(), EXECUTOR_MANAGER_KEY, manager)
		r = r.WithContext(ctx)
		HandleDispatch(w, r)
	})

	mux.HandleFunc("/job/stop/v1", func(w http.ResponseWriter, r *http.Request) {
		ctx := context.WithValue(r.Context(), EXECUTOR_MANAGER_KEY, manager)
		r = r.WithContext(ctx)
		HandleStop(w, r)
	})

	loggedMux := LoggingMiddleware(mux)

	LocalLog.Info("Starting server")
	if err := http.ListenAndServe(":1789", loggedMux); err != nil {
		LocalLog.Fatal(err)
	}
}

var LogContextKey = &ContextKey{"SnailLogContext"}

type ContextKey struct {
	name string
}

func (k *ContextKey) String() string {
	return "snailjob context value " + k.name
}

type SnailHttpLogHandler struct {
	mu       sync.Mutex
	capacity int
	interval time.Duration
	buffer   chan *JobLogTask
	timer    *time.Timer
}

type TaskLogFieldDTO struct {
	Name  string `json:"name"`
	Value string `json:"value"`
}

// 上报服务器日志结构
type JobLogTask struct {
	LogType     string            `json:"logType"`
	NamespaceID string            `json:"namespaceId"`
	GroupName   string            `json:"groupName"`
	RealTime    int64             `json:"realTime"`
	FieldList   []TaskLogFieldDTO `json:"fieldList"`
	JobID       int               `json:"jobId"`
	TaskBatchID int               `json:"taskBatchId"`
	TaskID      int               `json:"taskId"`
}

func FormatExcInfo(err error) string {
	if err == nil {
		return ""
	}
	return fmt.Sprintf("%s", err)
}

func (h *SnailHttpLogHandler) Transform(record *LogRecord) *JobLogTask {
	fieldList := []TaskLogFieldDTO{
		{"time_stamp", fmt.Sprintf("%d", record.timeStamp.UnixMilli())},
		{"level", record.level},
		{"thread", record.thread},
		{"message", record.message},
		{"location", fmt.Sprintf("%s:%s:%d", record.module, record.funcName, record.lineno)},
		{"throwable", FormatExcInfo(record.excInfo)},
		{"host", SNAIL_HOST_IP},
		{"port", SNAIL_HOST_PORT},
	}

	logContext := GetContext()

	return &JobLogTask{
		LogType:     "JOB",
		NamespaceID: SNAIL_NAMESPACE,
		GroupName:   SNAIL_GROUP_NAME,
		RealTime:    time.Now().UnixMilli(),
		FieldList:   fieldList,
		JobID:       logContext.JobID,
		TaskBatchID: logContext.TaskBatchID,
		TaskID:      logContext.TaskID,
	}
}

func (h *SnailHttpLogHandler) Emit(record *LogRecord) {
	dto := h.Transform(record)

	h.mu.Lock()
	defer h.mu.Unlock()

	if len(h.buffer) == 0 {
		h.StartTimer()
	}

	h.buffer <- dto

	if len(h.buffer) >= h.capacity {
		h.Flush()
	}
}

func (h *SnailHttpLogHandler) Flush() {
	h.mu.Lock()
	defer h.mu.Unlock()

	var items []*JobLogTask
	for len(h.buffer) > 0 {
		items = append(items, <-h.buffer)
	}

	if len(items) > 0 {
		h.Send(items)
	}
}

func (h *SnailHttpLogHandler) Send(items []*JobLogTask) {
	data, err := json.Marshal(items)
	if err != nil {
		LocalLog.Printf("Failed to marshal log data: %v", err)
		return
	}

	// TODO: 打包上传服务器，使用滑动窗口（基于缓冲及时间窗口）
	// 1. 组装JobLogTask
	// 2. http.Post
	fmt.Printf("Sending logs to server: %s\n", data)
}

func (h *SnailHttpLogHandler) StartTimer() {
	if h.timer != nil {
		h.timer.Stop()
	}
	h.timer = time.AfterFunc(h.interval, func() {
		h.Flush()
		h.StartTimer()
	})
}

func NewSnailHttpHandler(capacity int, interval time.Duration) *SnailHttpLogHandler {
	return &SnailHttpLogHandler{
		capacity: capacity,
		interval: interval,
		buffer:   make(chan *JobLogTask, capacity),
	}
}

// TODO: thread 在go中不支持，内容写goid？
type LogRecord struct {
	timeStamp time.Time
	level     string
	thread    string
	message   string
	module    string
	funcName  string
	lineno    int
	excInfo   error
}

// 用于传递 goroutine 上下文，接收调度请求后，需要使用goroutine运行执行器函数，
// 而在上报服务器实时日志时，需要获取taskId，jobId等信息。
func SetContext(ctx context.Context, data SnailLogContext) context.Context {
	return context.WithValue(ctx, LogContextKey, data)
}

func GetContext() SnailLogContext {
	ctx := context.Background()
	value := ctx.Value(LogContextKey)
	if value == nil {
		return SnailLogContext{}
	}
	return value.(SnailLogContext)
}

// 这是一个执行器样例
func TestJobExecutor(argsStr string) ExecuteResult {
	LocalLog.Info("Executing exampleExecutor with args: " + argsStr)
	ctx := GetContext()
	// FIXME: 获取不到实际参数
	fmt.Printf("%v", ctx)
	for i := 0; i < 10; i++ {
		// 会在服务器上的任务批次日志中提现
		RemoteLog.Info("这是一个循环体", i)
	}
	return ExecuteResult{Success: STATUS_SUCCESS, Message: "Execution successful"}
}

// 这是一个执行器样例, 他会抛出异常
func TestJobExecutorFailed(argsStr string) ExecuteResult {
	panic("这是故意抛出的异常")
}

func main() {
	ConfigLoggers()

	go SendHeartbeat()

	manager := NewExecutorManager()

	// 注册示例执行器
	manager.Register("testJobExecutor", TestJobExecutor)
	manager.Register("testJobExecutorFailed", TestJobExecutorFailed)

	RunServer(manager)
}
