package com.innodealing.service;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;

import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.List;

/**
 * Kafka测试消息生产者
 * 核心功能：批量生产消息并在指定位置插入错误消息，用于测试精确过滤功能
 */
@Service
public class KafkaProducerService {

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    private static final DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("HH:mm:ss.SSS");

    /**
     * 批量发送消息到指定主题，并在指定位置插入错误消息
     *
     * @param topic 主题名称
     * @param partition 分区号（可选，为null时由Kafka自动分配）
     * @param totalCount 总消息数量
     * @param errorPositions 错误消息插入位置（从0开始的索引）
     * @param messagePrefix 消息前缀
     * @return 发送结果列表
     */
    public List<SendResult<String, String>> sendBatchWithErrors(String topic, Integer partition,
                                                                int totalCount, List<Integer> errorPositions,
                                                                String messagePrefix) {
        List<SendResult<String, String>> results = new ArrayList<>();
        
        System.out.println("\n=== 开始批量发送消息 ===");
        System.out.println("主题: " + topic);
        System.out.println("分区: " + (partition != null ? partition : "自动分配"));
        System.out.println("总数量: " + totalCount);
        System.out.println("错误位置: " + errorPositions);
        System.out.println("时间: " + LocalDateTime.now().format(TIME_FORMATTER));
        
        for (int i = 0; i < totalCount; i++) {
            String key = messagePrefix + "-" + i;
            String value;
            
            // 检查是否在错误位置插入错误消息
            if (errorPositions.contains(i)) {
                value = createErrorMessage(messagePrefix, i);
                System.out.println("发送错误消息[" + i + "]: " + value);
            } else {
                value = createNormalMessage(messagePrefix, i);
                System.out.println("发送正常消息[" + i + "]: " + value);
            }
            
            try {
                SendResult<String, String> result;
                if (partition != null) {
                    // 发送到指定分区
                    result = kafkaTemplate.send(topic, partition, key, value).get();
                } else {
                    // 自动分区
                    result = kafkaTemplate.send(topic, key, value).get();
                }
                results.add(result);
                
                // 添加小延迟确保消息顺序
                Thread.sleep(10);
                
            } catch (Exception e) {
                System.err.println("发送消息失败[" + i + "]: " + e.getMessage());
            }
        }
        
        System.out.println("=== 批量发送完成 ===\n");
        return results;
    }

    /**
     * 创建正常消息
     */
    private String createNormalMessage(String prefix, int index) {
        return String.format("%s-normal-message-%d-at-%s", 
                           prefix, index, LocalDateTime.now().format(TIME_FORMATTER));
    }

    /**
     * 创建错误消息（包含触发异常的关键字）
     */
    private String createErrorMessage(String prefix, int index) {
        return String.format("%s-error-message-%d-business-error-at-%s", 
                           prefix, index, LocalDateTime.now().format(TIME_FORMATTER));
    }



    /**
     * 获取消息发送统计信息
     */
    public void printSendStatistics(List<SendResult<String, String>> results) {
        if (results.isEmpty()) {
            System.out.println("无发送统计信息");
            return;
        }
        
        System.out.println("\n === 发送统计信息 ===");
        System.out.println("总发送数量: " + results.size());
        
        // 按分区统计
        results.stream()
                .collect(java.util.stream.Collectors.groupingBy(
                    result -> result.getRecordMetadata().partition()))
                .forEach((partition, partitionResults) -> {
                    long minOffset = partitionResults.stream()
                            .mapToLong(r -> r.getRecordMetadata().offset())
                            .min().orElse(0);
                    long maxOffset = partitionResults.stream()
                            .mapToLong(r -> r.getRecordMetadata().offset())
                            .max().orElse(0);
                    System.out.println("分区 " + partition + ": " + partitionResults.size() + 
                                     " 条消息, offset范围: " + minOffset + "-" + maxOffset);
                });
        
        System.out.println("=== 统计信息结束 ===\n");
    }
}
