package middleware_mgr

import (
	"context"
	"database/sql"
	"encoding/json"
	"fmt"
	"io"
	"os"
	"os/exec"
	"path/filepath"
	"pipeline/cache"
	"pipeline/config"
	"pipeline/schema"
	"strings"
	"sync/atomic"
	"time"

	"github.com/IBM/sarama"
	"github.com/sirupsen/logrus"
	etcd "go.etcd.io/etcd/clientv3"
)

// 初始化时创建Python进程池
const pythonPoolSize = 10 // 根据CPU核心数调整

var (
	etcdClient   *etcd.Client
	mysqlDB      *sql.DB
	configCache  *cache.Cache
	consumers    *cache.Cache
	tomlConfData *config.Config
	id           string
)

type Consumer struct {
	sarama.ConsumerGroup
	topic      string
	scriptPath string
	cancel     context.CancelFunc
	config     schema.ConsumerConfig
	stats      schema.TopicStatus
	//消息统计
	messageCount int64
}

// 实现消费者组处理器
type consumerGroupHandler struct {
	consumer *Consumer
	buffer   []*sarama.ConsumerMessage
}

func init() {
	configCache = cache.NewCache()
	consumers = cache.NewCache()
}

func InitMiddleware(conf *config.Config) error {
	var err error
	if err = initEtcd(conf); err != nil {
		logrus.Error(err.Error(), ".init etcd fail")
		return err
	}
	if err = initMySQL(); err != nil {
		logrus.Error(err.Error(), ".init mysql fail")
		return err
	}

	return err
}

func initEtcd(conf *config.Config) error {
	tomlConfData = conf
	id = fmt.Sprintf("%s-%d", tomlConfData.LocalIP, os.Getpid())
	cli, err := etcd.New(etcd.Config{
		Endpoints:   tomlConfData.EtcdEndpoints,
		DialTimeout: tomlConfData.EtcdDialTimeout,
	})

	if err != nil {
		logrus.Error(err.Error())
		return err
	}
	etcdClient = cli

	// 创建租约
	resp, err := etcdClient.Grant(context.Background(), 30)
	if err != nil {
		logrus.Error(err.Error(), "etcd grant fail")
		return err
	}
	leaseID := resp.ID
	// 初始化健康检查
	_, err = etcdClient.Put(context.Background(),
		fmt.Sprintf("/health/%s", tomlConfData.LocalIP),
		"alive",
		etcd.WithLease(leaseID))

	return err
}

// 初始化MySQL连接池
func initMySQL() error {
	resp, err := etcdClient.Get(context.Background(), "/config/global/mysql")
	if err != nil {
		return err
	}
	var mysqlConf schema.MysqlConfig
	if err := json.Unmarshal(resp.Kvs[0].Value, &mysqlConf); err != nil {
		return err
	}
	dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?parseTime=true",
		mysqlConf.User, mysqlConf.Password, mysqlConf.Host, mysqlConf.Database)
	db, err := sql.Open("mysql", dsn)
	if err != nil {
		logrus.Error(err.Error())
		return err
	}

	db.SetMaxOpenConns(20)
	db.SetMaxIdleConns(5)
	db.SetConnMaxLifetime(5 * time.Minute)
	mysqlDB = db
	return db.Ping()
}

// 配置监听协程
func ConfigWatcher() {
	watchChan := etcdClient.Watch(context.Background(), "/config/topics/", etcd.WithPrefix())
	for resp := range watchChan {
		for _, ev := range resp.Events {
			handleConfigEvent(ev)
		}
	}
}

func handleConfigEvent(ev *etcd.Event) {
	topic := strings.TrimPrefix(string(ev.Kv.Key), "/config/topics/")

	switch ev.Type {
	case etcd.EventTypePut:
		var config schema.TopicConfig
		if err := json.Unmarshal(ev.Kv.Value, &config); err != nil {
			logrus.Printf("解析配置失败: %s: %v", topic, err)
			return
		}

		// 生成Python处理脚本
		scriptPath := filepath.Join(tomlConfData.PythonScriptDir, fmt.Sprintf("%s.py", topic))
		if err := generatePythonScript(config.Function, scriptPath); err != nil {
			logrus.Printf("生成Python脚本失败: %s: %v", topic, err)
			return
		}
		configCache.Store(topic, struct {
			Table      string
			ScriptPath string
		}{
			Table:      config.Table,
			ScriptPath: scriptPath,
		})

		ManageConsumer(topic, config.Status)
	case etcd.EventTypeDelete:
		configCache.Delete(topic)
		StopConsumer(topic)
	}
}

// 生成Python处理脚本
func generatePythonScript(code, path string) error {
	content := replaceUserCode(tomlConfData.PythonHandler.Code, code)
	return os.WriteFile(path, []byte(content), 0644)
}

func replaceUserCode(original, newCode string) string {
	// 定义替换标记
	startMarker := "USER_CODE = \"\"\""
	endMarker := "\"\"\""

	// 查找替换区间
	startIndex := strings.Index(original, startMarker)
	if startIndex == -1 {
		logrus.Fatal("找不到USER_CODE起始标记")
	}
	startIndex += len(startMarker)

	endIndex := strings.Index(original[startIndex:], endMarker)
	if endIndex == -1 {
		logrus.Fatal("找不到USER_CODE结束标记")
	}
	endIndex += startIndex

	// 构建新内容
	return original[:startIndex] +
		"\n" + newCode + "\n" + // 添加换行保证格式
		original[endIndex:]
}

// 管理消费者
func ManageConsumer(topic, status string) {
	logrus.Info("ManageConsumer. topic:", topic, ",status:", status)
	switch status {
	case "running":
		StartConsumer(topic)
	case "stopped":
		StopConsumer(topic)
	}
}

// 辅助函数
func getScriptPath(topic string) string {
	val, _ := configCache.Load(topic)
	return val.(struct{ Table, ScriptPath string }).ScriptPath
}

func getGlobalKafkaConfig() schema.KafkaConfig {
	var (
		result schema.KafkaConfig
	)
	resp, err := etcdClient.Get(context.Background(), "/config/global/kafka")
	if err != nil {
		return result
	}

	if err := json.Unmarshal(resp.Kvs[0].Value, &result); err != nil {
		logrus.Error(err.Error())
		return result
	}

	return result
}

func getKafkaBrokers() []string {
	result := getGlobalKafkaConfig()

	return strings.Split(string(result.BootstrapServers), ",")
}

func (c *Consumer) handler() sarama.ConsumerGroupHandler {
	return &consumerGroupHandler{consumer: c}
}

func (h *consumerGroupHandler) Setup(s sarama.ConsumerGroupSession) error {
	logrus.Infof("消费者组会话启动，MemberID: %s", s.MemberID())
	return nil
}

func (h *consumerGroupHandler) Cleanup(s sarama.ConsumerGroupSession) error {
	logrus.Info("消费者组会话结束")
	return nil
}

// ConsumeClaim 核心消费逻辑
func (h *consumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
	claim sarama.ConsumerGroupClaim) error {
	ticker := time.NewTicker(h.consumer.config.BatchTimeout)
	defer ticker.Stop()

	for {
		select {
		case msg, ok := <-claim.Messages():
			if !ok {
				if len(h.buffer) > 0 {
					h.processBatch(session, h.buffer)
				}
				return nil
			}
			h.buffer = append(h.buffer, msg)
			if len(h.buffer) >= h.consumer.config.BatchSize {
				h.processBatch(session, h.buffer)
				h.buffer = h.buffer[:0]
			}
		case <-ticker.C:
			if len(h.buffer) > 0 {
				h.processBatch(session, h.buffer)
				h.buffer = h.buffer[:0]
			}
		}
	}
	/*
		for message := range claim.Messages() {
			h.consumer.processMessage(message.Value)
			session.MarkMessage(message, "") // 标记消息已处理
		}
		return nil
	*/
}

func (h *consumerGroupHandler) processBatch(session sarama.ConsumerGroupSession, batch []*sarama.ConsumerMessage) {
	atomic.AddInt64(&h.consumer.messageCount, int64(len(batch)))

	// 收集批量数据
	var batchData [][]byte
	for _, msg := range batch {
		batchData = append(batchData, msg.Value)
	}

	// 批量处理
	results, err := execPythonBatch(h.consumer.scriptPath, batchData)
	if err != nil {
		logrus.Errorf("批量处理失败[%s]: %v", h.consumer.topic, err)
		return
	}

	// 批量存储
	if err := saveBatchToMySQL(h.consumer.topic, results); err != nil {
		logrus.Errorf("批量存储失败[%s]: %v", h.consumer.topic, err)
		return
	}

	// 标记最后一条消息
	for _, msg := range batch {
		session.MarkMessage(msg, "")
	}
}

// 修改Python执行函数支持批量处理
func execPythonBatch(scriptPath string, batch [][]byte) ([]map[string]interface{}, error) {
	cmd := exec.Command("python3", scriptPath)
	cmd.Stderr = os.Stderr

	stdin, _ := cmd.StdinPipe()
	stdout, _ := cmd.StdoutPipe()

	go func() {
		defer stdin.Close()
		var dataArray []json.RawMessage
		for _, d := range batch {
			dataArray = append(dataArray, json.RawMessage(d))
		}
		if err := json.NewEncoder(stdin).Encode(dataArray); err != nil {
			logrus.Error("encode err:", err)
		} // 发送JSON数组
	}()

	if err := cmd.Start(); err != nil {
		return nil, err
	}
	defer cmd.Wait()
	var response schema.PythonResponse
	rawOutput, _ := io.ReadAll(stdout)

	// 添加调试日志
	logrus.Debugf("Python原始输出: %s", string(rawOutput))
	if err := json.Unmarshal(rawOutput, &response); err != nil {
		return nil, fmt.Errorf("JSON解析失败: %v 原始内容: %s", err, string(rawOutput))
	}

	if !response.Success {
		return nil, fmt.Errorf("Python处理错误[%s]: %s", response.Type, response.Error)
	}

	// 验证数据长度
	if len(response.Data) != len(batch) {
		return nil, fmt.Errorf("结果数量不匹配 预期:%d 实际:%d",
			len(batch), len(response.Data))
	}

	return response.Data, nil
}

// 3. 修改MySQL存储支持批量插入
func saveBatchToMySQL(topic string, batch []map[string]interface{}) error {
	val, exists := configCache.Load(topic)
	if !exists || len(batch) == 0 {
		return nil
	}
	config := val.(struct{ Table, ScriptPath string })

	// 构造批量插入语句
	var values []interface{}
	var placeholders []string
	columns := make([]string, 0, len(batch[0]))

	for k := range batch[0] {
		columns = append(columns, k)
	}

	for _, data := range batch {
		row := make([]interface{}, len(columns))
		for i, col := range columns {
			row[i] = data[col]
		}
		values = append(values, row...)
		placeholders = append(placeholders, "("+strings.Repeat("?,", len(columns)-1)+"?)")
	}
	query := fmt.Sprintf("INSERT IGNORE INTO %s (%s) VALUES %s",
		config.Table,
		strings.Join(columns, ","),
		strings.Join(placeholders, ","),
	)

	_, err := mysqlDB.Exec(query, values...)
	return err
}

// 保留原有消息处理逻辑
func (c *Consumer) processMessage(data []byte) error {
	atomic.AddInt64(&c.messageCount, 1)
	// 执行Python处理
	result, err := execPython(c.scriptPath, data)
	if err != nil {
		logrus.Errorf("处理失败[%s]: %v, data: %s", c.topic, err, string(data))
		return err
	}
	// 存储到MySQL
	if err := saveToMySQL(c.topic, result); err != nil {
		logrus.Errorf("存储失败[%s]: %v, data: %s", c.topic, err, string(data))
		return err
	}
	return nil
}

// 启动统计报告协程
func (c *Consumer) StartStatsReporter(ctx context.Context) {
	ticker := time.NewTicker(30 * time.Second)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			// 上报消费状态
			topicStatus := &schema.TopicStatus{
				ID:            id,
				Host:          tomlConfData.LocalIP,
				Topic:         c.topic,
				ConsumerCount: atomic.LoadInt64(&c.messageCount),
				LatestOffset:  getLatestOffset(c.topic),
				LastUpdate:    time.Now().Unix(),
			}
			data, _ := json.Marshal(topicStatus)
			etcdClient.Put(context.Background(),
				fmt.Sprintf("/status/topics/%s/consumers/%s", c.topic, id),
				string(data),
			)
		}
	}
}

func execPython(scriptPath string, input []byte) (map[string]interface{}, error) {
	cmd := exec.Command("python3", scriptPath)
	cmd.Stderr = os.Stderr

	stdin, _ := cmd.StdinPipe()
	go func() {
		defer stdin.Close()
		stdin.Write(input)
	}()

	output, err := cmd.Output()
	if err != nil {
		return nil, fmt.Errorf("执行错误: %v", err)
	}

	var response struct {
		Success bool                   `json:"success"`
		Data    map[string]interface{} `json:"data"`
		Error   string                 `json:"error"`
	}
	if err := json.Unmarshal(output, &response); err != nil {
		return nil, fmt.Errorf("解析结果失败: %v", err)
	}

	if !response.Success {
		return nil, fmt.Errorf("处理错误: %s", response.Error)
	}

	return response.Data, nil
}

func saveToMySQL(topic string, data map[string]interface{}) error {
	val, exists := configCache.Load(topic)
	if !exists {
		return fmt.Errorf("配置不存在: %s", topic)
	}
	config := val.(struct{ Table, ScriptPath string })

	columns := make([]string, 0, len(data))
	values := make([]interface{}, 0, len(data))
	placeholders := make([]string, 0, len(data))

	for k, v := range data {
		columns = append(columns, k)
		values = append(values, v)
		placeholders = append(placeholders, "?")
	}

	query := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)",
		config.Table,
		strings.Join(columns, ","),
		strings.Join(placeholders, ","),
	)

	_, err := mysqlDB.Exec(query, values...)
	if err != nil {
		logrus.Error("mysql exec fail. ", err.Error())
	}

	return err
}

func StartConsumer(topic string) {
	if _, exists := consumers.Load(topic); exists {
		return
	}

	ctx, cancel := context.WithCancel(context.Background())
	consumer := &Consumer{
		topic:      topic,
		scriptPath: getScriptPath(topic),
		cancel:     cancel,
		config: schema.ConsumerConfig{
			BatchSize:    500,             // 减少批次大小
			BatchTimeout: 1 * time.Second, // 批次超时时间
		},
	}

	// 初始化Kafka消费者
	brokers := getKafkaBrokers()
	kfkConf := getGlobalKafkaConfig()

	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{
		sarama.NewBalanceStrategyRange(),
	}
	config.Consumer.Group.Rebalance.Timeout = 60 * time.Second
	if kfkConf.AutoOffsetReset == "latest" {
		config.Consumer.Offsets.Initial = sarama.OffsetNewest
	} else {
		config.Consumer.Offsets.Initial = sarama.OffsetOldest
	}

	// 调整消费参数
	config.Consumer.Fetch.Default = 10 * 1024 * 1024
	config.Consumer.Fetch.Max = 50 * 1024 * 1024
	config.Consumer.Fetch.Min = 1 * 1024 * 1024
	config.Consumer.MaxWaitTime = 1 * time.Second
	config.Consumer.MaxProcessingTime = 30 * time.Second
	config.Consumer.Retry.Backoff = 500 * time.Millisecond

	consumerGroup, err := sarama.NewConsumerGroup(brokers, kfkConf.GroupId, config)
	if err != nil {
		logrus.Printf("创建消费者失败: %s: %v", topic, err)
		return
	}
	consumer.ConsumerGroup = consumerGroup // 数据库批量插入大小
	consumers.Store(topic, consumer)

	// 启动消费协程
	go consumer.Run(ctx)
	logrus.Printf("消费者已启动: %s", topic)
}

func (c *Consumer) Run(ctx context.Context) {
	defer c.Close()
	logrus.Info("Start consuming topic:", c.topic)
	go c.StartStatsReporter(ctx)

	for {
		select {
		case <-ctx.Done():
			logrus.Info("接收到停止信号，终止消费")
			return
		default:
			if err := c.Consume(ctx, []string{c.topic}, c.handler()); err != nil {
				logrus.Errorf("消费错误: %v", err)
				time.Sleep(5 * time.Second)
			}
		}
	}
}

func (c *Consumer) Close() {
	c.ConsumerGroup.Close()
}

func StopConsumer(topic string) {
	if val, exists := consumers.LoadAndDelete(topic); exists {
		consumer := val.(*Consumer)
		consumer.Close()
		consumer.cancel()
		logrus.Printf("消费者已停止: %s", topic)
	}
}

// 健康检查协程
func HealthKeeper() {
	ticker := time.NewTicker(10 * time.Second)
	defer ticker.Stop()

	// 创建租约
	leaseResp, err := etcdClient.Grant(context.Background(), 30)
	if err != nil {
		logrus.Fatal("创建租约失败:", err)
	}

	// 初始健康状态
	status := schema.HealthStatus{
		ID:      id,
		Status:  "running",
		Host:    tomlConfData.LocalIP,
		Version: "1.0.0", // 版本号
	}

	for {
		select {
		case <-ticker.C:
			// 更新状态
			status.LastUpdate = time.Now().Unix()

			// 序列化数据
			data, _ := json.Marshal(status)

			// 上报到ETCD（带租约）
			_, err := etcdClient.Put(context.Background(),
				fmt.Sprintf("/health/%s", tomlConfData.LocalIP),
				string(data),
				etcd.WithLease(leaseResp.ID),
			)

			if err != nil {
				logrus.Printf("健康状态上报失败: %v", err)
				status.Status = "error"
			} else {
				status.Status = "running"
			}

			// 续租约
			if _, err := etcdClient.KeepAliveOnce(context.Background(), leaseResp.ID); err != nil {
				logrus.Printf("租约续期失败: %v", err)
			}
		}
	}
}

func getLatestOffset(topic string) int64 {
	client, err := sarama.NewClient(getKafkaBrokers(), sarama.NewConfig())
	if err != nil {
		logrus.Printf("创建Kafka客户端失败: %v", err)
		return -1
	}
	defer client.Close()

	partitions, err := client.Partitions(topic)
	if err != nil {
		logrus.Printf("获取分区失败: %s: %v", topic, err)
		return -1
	}

	var total int64
	for _, partition := range partitions {
		offset, err := client.GetOffset(topic, partition, sarama.OffsetNewest)
		if err == nil {
			total += offset
		}
	}
	return total
}

func Stop() {
	etcdClient.Close()
	mysqlDB.Close()
}
