package com.example.demo.service;

import com.example.demo.model.KafkaExportConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.stereotype.Service;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.support.SendResult;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.kafka.config.TopicBuilder;
import org.apache.kafka.clients.admin.NewTopic;
import java.util.concurrent.TimeUnit;

@Service
@Slf4j
public class DataExportService {
    @Autowired
    private JdbcTemplate jdbcTemplate;
    
    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;
    
    @Autowired
    private ObjectMapper objectMapper;
    
    @Autowired
    private KafkaAdmin kafkaAdmin;

    private static final int MAX_RETRIES = 3;
    private static final long RETRY_DELAY = 1000L; // 1秒

    /**
     * 确保主题存在
     */
    private void ensureTopicExists(String topicName) {
        int retryCount = 0;
        while (retryCount < MAX_RETRIES) {
            try {
                NewTopic newTopic = TopicBuilder.name(topicName)
                    .partitions(1)
                    .replicas(1)
                    .build();
                kafkaAdmin.createOrModifyTopics(newTopic);
                log.info("主题 {} 创建成功或已存在", topicName);
                return;
            } catch (Exception e) {
                retryCount++;
                log.warn("创建主题 {} 第 {} 次尝试失败: {}", topicName, retryCount, e.getMessage());
                if (retryCount < MAX_RETRIES) {
                    try {
                        Thread.sleep(RETRY_DELAY * retryCount);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                }
            }
        }
        log.error("创建主题 {} 失败，已重试 {} 次", topicName, MAX_RETRIES);
    }

    /**
     * 导出数据到Kafka
     * @param data 要导出的数据
     * @param kafkaConfig Kafka配置信息
     * @return 成功发送的消息数量
     */
    public int exportToKafka(List<Map<String, String>> data, KafkaExportConfig kafkaConfig) {
        log.debug("开始准备导出数据到Kafka, 配置信息: {}", kafkaConfig);
        
        // 验证配置
        if (kafkaConfig.getTopic() == null || kafkaConfig.getTopic().trim().isEmpty()) {
            throw new IllegalArgumentException("Topic不能为空");
        }

        ensureTopicExists(kafkaConfig.getTopic());
        
        int successCount = 0;
        int totalCount = data.size();
        
        log.info("开始导出数据到Kafka, topic: {}, 总数据量: {}", kafkaConfig.getTopic(), totalCount);
        
        for (int i = 0; i < totalCount; i++) {
            int retryCount = 0;
            boolean success = false;
            
            while (!success && retryCount < MAX_RETRIES) {
                try {
                    Map<String, String> row = data.get(i);
                    log.debug("正在处理第 {} 条数据: {}", i + 1, row);
                    String json = objectMapper.writeValueAsString(row);
                    
                    CompletableFuture<SendResult<String, String>> future = kafkaTemplate.send(
                        kafkaConfig.getTopic(),
                        kafkaConfig.getKey(),
                        json
                    );
                    
                    // 等待发送完成
                    SendResult<String, String> result = future.get(5, TimeUnit.SECONDS);
                    RecordMetadata metadata = result.getRecordMetadata();
                    log.debug("消息发送成功 - topic: {}, partition: {}, offset: {}",
                        metadata.topic(), metadata.partition(), metadata.offset());
                    
                    success = true;
                    successCount++;
                    
                } catch (Exception e) {
                    retryCount++;
                    log.error("处理第 {} 条数据时发生错误，重试次数 {}/{}", i + 1, retryCount, MAX_RETRIES, e);
                    if (retryCount < MAX_RETRIES) {
                        try {
                            Thread.sleep(RETRY_DELAY * retryCount);
                        } catch (InterruptedException ie) {
                            Thread.currentThread().interrupt();
                            break;
                        }
                    }
                }
            }
            
            if (i > 0 && i % 100 == 0) {
                log.info("已发送: {}/{}", i, totalCount);
            }
        }
        
        log.info("数据导出完成, 成功: {}, 总数: {}", successCount, totalCount);
        return successCount;
    }

    public void exportToMysql(List<Map<String, String>> data, String table) {
        // 构建插入SQL
        String columns = String.join(",", data.get(0).keySet());
        String placeholders = String.join(",", Collections.nCopies(data.get(0).size(), "?"));
        String sql = String.format("INSERT INTO %s (%s) VALUES (%s)", table, columns, placeholders);
        
        // 批量插入数据
        jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
            @Override
            public void setValues(PreparedStatement ps, int i) throws SQLException {
                Map<String, String> row = data.get(i);
                int index = 1;
                for (String value : row.values()) {
                    ps.setString(index++, value);
                }
            }
            
            @Override
            public int getBatchSize() {
                return data.size();
            }
        });
    }

    public void someMethod() {
        log.info("这是一条信息日志");
        log.debug("这是一条调试日志");
        log.error("这是一条错误日志", new Exception("发生错误"));
    }
} 