package nats

// nats 版本 , 需搭建nats 服务
import (
	"context"
	"encoding/json"
	"fmt"
	"github.com/ThreeDotsLabs/watermill"
	"github.com/ThreeDotsLabs/watermill/message"
	"github.com/ThreeDotsLabs/watermill/message/router/middleware"
	"github.com/go-redis/redis"
	"github.com/nats-io/nats.go"
	"os"
	"proxy_resource_test/bootstrap/models"
	"proxy_resource_test/config"
	"proxy_resource_test/dao"
	"proxy_resource_test/services/mill"
	"proxy_resource_test/services/pool_ip/rsp"
	"proxy_resource_test/utils/logging"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

const (
	ConfigKey          = "task:config:pool"       // 配置写入的哈希键
	ConfigChannel      = "task:config:pool:chg"   // 配置变更发布频道
	ControlChannel     = "task:control:pool"      // 控制命令频道
	StatusHashKey      = "task:stat:pool"         // 任务状态哈希键
	TaskCounter        = "task:stat:pool:count"   // 任务当前计数器
	TaskSuccessCounter = "task:stat:pool:success" // 任务成功计数器
	TaskErrCounter     = "task:stat:pool:error"   // 任务失败计数器
	TaskIpRange        = "task:stat:range"        // ip键， 过期时间最大为30分钟

	//  控制命令
	CmdStart string = "start"
	CmdPause string = "pause"
	CmdStop  string = "stop"
)

type TaskConfig = models.TaskConfig

type taskRunner struct {
	cancel context.CancelFunc
}
type ManagerStruct struct {
	mu       sync.Mutex
	tasks    map[string]*taskRunner
	js       nats.JetStreamContext
	subjBase string
	consumer string
}

var Manager = NewManager()

func NewManager() *ManagerStruct {
	// 1. 连接 NATS 并创建 JetStream 上下文
	nc, err := nats.Connect(config.AppConfig.NatsUrl)
	if err != nil {
		logging.Info("NATS connect error: %v", err)
	}
	js, err := nc.JetStream()
	if err != nil {
		logging.Info("JetStream context error: %v", err)
	}

	// 2. 每个实例使用唯一 Durable 名称，以保证独立消费历史消息
	hostname, _ := os.Hostname()
	consumerName := fmt.Sprintf("ctrl-%s-%d", hostname, time.Now().UnixNano())

	m := &ManagerStruct{
		tasks:    make(map[string]*taskRunner),
		js:       js,
		subjBase: "task.control", // 发布到 task.control.<taskID>
		consumer: consumerName,
	}

	// 3. 确保 Stream 已存在（第一次可手动或程序中创建）
	//    streamName := "TASK_CONTROL_STREAM"
	//    js.AddStream(&nats.StreamConfig{
	//        Name:     streamName,
	//        Subjects: []string{m.subjBase + ".*"},
	//    })

	// 4. 订阅所有控制主题，并设为 DeliverAllAvailable()，手动 Ack
	_, err = js.Subscribe(m.subjBase+".*", m.handleControl,
		nats.Durable(m.consumer),
		nats.ManualAck(),
		nats.DeliverAll(),
	)
	if err != nil {
		logging.Error("JetStream subscribe error: %v", err)
	}

	return m
}

// 处理每条控制消息
func (m *ManagerStruct) handleControl(msg *nats.Msg) {
	// subject 格式： "task.control.<taskID>"
	parts := strings.Split(msg.Subject, ".")
	if len(parts) != 3 {
		msg.Ack()
		return
	}
	taskID := parts[2]
	cmd := string(msg.Data)

	switch cmd {
	case CmdStart:
		m.Start(taskID)
	case CmdPause, CmdStop:
		m.Stop(taskID)
	}
	msg.Ack()
}

// Start 启动任务
func (m *ManagerStruct) Start(taskID string) {
	m.mu.Lock()
	defer m.mu.Unlock()
	statusKey := fmt.Sprintf("%s:%s", StatusHashKey, taskID)
	if status := dao.GetRedis().HGet(statusKey, "status").Val(); status == "completed" {
		logging.Info("task %s already completed, skip start", taskID)
		return
	}
	if _, running := m.tasks[taskID]; running {
		logging.Info("task %s already running", taskID)
		return
	}

	// 启动
	ctx, cancel := context.WithCancel(context.Background())
	m.tasks[taskID] = &taskRunner{cancel: cancel}
	go runTask(ctx, taskID)
	go cleanMoreThanThirty(taskID)
	logging.Info("task %s started", taskID)
}

func (m *ManagerStruct) Stop(taskID string) {
	m.mu.Lock()
	defer m.mu.Unlock()
	if runner, ok := m.tasks[taskID]; ok {
		runner.cancel()
		delete(m.tasks, taskID)
		logging.Info("task %s stopped", taskID)
	} else {
		logging.Info("task %s not found", taskID)
	}
}

func initOrFetchConfig(taskID string) (*TaskConfig, error) {
	key := fmt.Sprintf("%s:%s", ConfigKey, taskID)
	exists, err := dao.GetRedis().Exists(key).Result()
	if err != nil {
		logging.Error("read file exists err:", err)
		return nil, err
	}
	if exists == 0 {
		return nil, fmt.Errorf("config not exists")
	}
	m, err := dao.GetRedis().HGetAll(key).Result()
	if err != nil {
		return nil, err
	}
	total, _ := strconv.Atoi(m["total"])
	sem, _ := strconv.Atoi(m["concurrency"])
	return &TaskConfig{
		Operate:     m["operate"],
		Total:       int32(total),
		Concurrency: sem,
		TargetURL:   m["target_url"],
		UserTpl:     m["user_tpl"],
		User:        m["user"],
		Password:    m["password"],
		ProxyAddr:   m["proxy_addr"],
	}, nil
}

func runTask(ctx context.Context, taskID string) {
	// 加载配置
	cfg, err := initOrFetchConfig(taskID)
	if err != nil {
		logging.Error("load config error:", err)
		return
	}
	// 更新状态为 running
	statusKey := fmt.Sprintf("%s:%s", StatusHashKey, taskID)
	dao.GetRedis().HSet(statusKey, "status", "running")

	// 并发控制
	sem := make(chan struct{}, cfg.Concurrency)
	var wg sync.WaitGroup

	var executed int32
	// 发起请求
	tRsp := rsp.TaskRspGather{
		ProxyAddr:   cfg.ProxyAddr,
		UserTpl:     cfg.UserTpl,
		User:        cfg.User,
		Password:    cfg.Password,
		TargetStr:   cfg.TargetURL,
		OperateName: cfg.OperateName,
	}
	for {
		select {
		case <-ctx.Done():
			// 收到停止信号
			wg.Wait()
			dao.GetRedis().HSet(statusKey, "status", "stopped")
			return
		default:
		}
		// 全局递增
		cur, err := dao.GetRedis().Incr(fmt.Sprintf("%s:%s", TaskCounter, taskID)).Result()
		if err != nil {
			logging.Error("incr count error:", err)
			continue
		}
		nowNums := atomic.LoadInt32(&executed)
		// 检查 total 限制， 默认最多到1000W 次
		if cfg.Total > 0 && cur > int64(cfg.Total) || nowNums >= 10000000 {
			logging.Info("reached total limit, stopping")
			wg.Wait()
			dao.GetRedis().HSet(fmt.Sprintf("%s:%s", StatusHashKey, taskID), "status", "completed")
			return
		}
		wg.Add(1)
		sem <- struct{}{}
		go func(task rsp.TaskRspGather) {
			defer wg.Done()
			defer func() { <-sem }()
			err, TimeSince, infoIp := task.PerformRequest("us")
			if err != nil {
				_ = dao.GetRedis().IncrBy(fmt.Sprintf("%s:%s", TaskErrCounter, taskID), 1).Err()
				fmt.Println("err", err.Error())
				return
			}
			_ = dao.GetRedis().IncrBy(fmt.Sprintf("%s:%s", TaskSuccessCounter, taskID), 1).Err()
			infoIp.CreatedAt = time.Now().Unix()
			infoIp.Operator = taskID
			infoIp.Delay = TimeSince
			data, err := json.Marshal(infoIp)
			if err != nil {
				fmt.Println("JSON 序列化失败:", err)
				return
			}
			atomic.AddInt32(&executed, 1) // 增加计数器
			// 数据写入redis key , 留存30 分钟 做统计使用,开启过期30分钟清理的任务，将小于当前时间30分钟数据清理掉
			dao.GetRedis().ZAdd(fmt.Sprintf("%s:%s", TaskIpRange, taskID), redis.Z{
				Score:  float64(time.Now().Unix()),
				Member: infoIp.IP,
			})

			msgID := watermill.NewUUID()
			msg := message.NewMessage(msgID, data)
			middleware.SetCorrelationID(msgID, msg)
			if err := mill.PubSub.Publish(mill.IpInfoRegion, msg); err != nil {
				return
			}
			fmt.Println("now-nums", atomic.LoadInt32(&executed), "sem", cfg.Concurrency)
		}(tRsp)
	}
}

// cleanMoreThirty 清理过期数据
func cleanMoreThanThirty(taskID string) {
	ticker := time.NewTicker(time.Minute)
	defer ticker.Stop()
	keys := fmt.Sprintf("%s:%s", TaskIpRange, taskID)
	for now := range ticker.C {
		threshold := now.Unix() - 30*60
		if err := dao.GetRedis().ZRemRangeByScore(keys, "0", strconv.FormatInt(threshold, 10)).Err(); err != nil {
			logging.Error("clean expired zset error:", err, "key:", keys)
		}
	}
}
