package com.example.teemor.es_demo.service;

import com.example.teemor.es_demo.config.LogManagementProperties;
import com.example.teemor.es_demo.entity.ServiceLog;
import com.example.teemor.es_demo.entity.UserBehavior;
import com.example.teemor.es_demo.repository.clickhouse.LogRepository;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Service;

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * Kafka日志消费者服务
 * 负责消费Kafka中的日志消息并批量写入ClickHouse
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class LogConsumerService {
    
    private final LogRepository logRepository;
    private final LogManagementProperties properties;
    private final ObjectMapper objectMapper;
    
    // 服务日志缓冲队列
    private final ConcurrentLinkedQueue<ServiceLog> serviceLogBuffer = new ConcurrentLinkedQueue<>();
    private final AtomicInteger serviceLogCount = new AtomicInteger(0);
    
    // 用户行为日志缓冲队列
    private final ConcurrentLinkedQueue<UserBehavior> userBehaviorBuffer = new ConcurrentLinkedQueue<>();
    private final AtomicInteger userBehaviorCount = new AtomicInteger(0);
    
    // 定时任务执行器
    private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(2);
    
    /**
     * 初始化定时任务
     */
    public void init() {
        // 定时写入服务日志到ClickHouse
        scheduler.scheduleAtFixedRate(
            this::flushServiceLogsToClickHouse,
            properties.getClickhouse().getBatch().getTimeout(),
            properties.getClickhouse().getBatch().getTimeout(),
            TimeUnit.MILLISECONDS
        );
        
        // 定时写入用户行为日志到ClickHouse
        scheduler.scheduleAtFixedRate(
            this::flushUserBehaviorsToClickHouse,
            properties.getClickhouse().getBatch().getTimeout(),
            properties.getClickhouse().getBatch().getTimeout(),
            TimeUnit.MILLISECONDS
        );
        
        log.info("日志消费者服务初始化完成");
    }
    
    /**
     * 消费服务日志
     */
    @KafkaListener(topics = "${app.log-management.kafka.topics.service-log}", 
                   containerFactory = "kafkaListenerContainerFactory")
    public void consumeServiceLogs(ConsumerRecord<String, String> record, Acknowledgment ack) {
        try {
            String message = record.value();
            List<ServiceLog> logs = objectMapper.readValue(message, new TypeReference<List<ServiceLog>>() {});
            
            // 添加到缓冲区
            serviceLogBuffer.addAll(logs);
            serviceLogCount.addAndGet(logs.size());
            
            log.debug("接收到服务日志批次，数量: {}", logs.size());
            
            // 检查是否达到批处理大小
            if (serviceLogCount.get() >= properties.getClickhouse().getBatch().getSize()) {
                flushServiceLogsToClickHouse();
            }
            
            // 手动确认消息
            ack.acknowledge();
            
        } catch (Exception e) {
            log.error("处理服务日志消息失败: {}", record.value(), e);
            // 这里可以选择是否确认消息，或者将消息发送到死信队列
            ack.acknowledge();
        }
    }
    
    /**
     * 消费用户行为日志
     */
    @KafkaListener(topics = "${app.log-management.kafka.topics.user-behavior}", 
                   containerFactory = "kafkaListenerContainerFactory")
    public void consumeUserBehaviors(ConsumerRecord<String, String> record, Acknowledgment ack) {
        try {
            String message = record.value();
            List<UserBehavior> behaviors = objectMapper.readValue(message, new TypeReference<List<UserBehavior>>() {});
            
            // 添加到缓冲区
            userBehaviorBuffer.addAll(behaviors);
            userBehaviorCount.addAndGet(behaviors.size());
            
            log.debug("接收到用户行为日志批次，数量: {}", behaviors.size());
            
            // 检查是否达到批处理大小
            if (userBehaviorCount.get() >= properties.getClickhouse().getBatch().getSize()) {
                flushUserBehaviorsToClickHouse();
            }
            
            // 手动确认消息
            ack.acknowledge();
            
        } catch (Exception e) {
            log.error("处理用户行为日志消息失败: {}", record.value(), e);
            // 这里可以选择是否确认消息，或者将消息发送到死信队列
            ack.acknowledge();
        }
    }
    
    /**
     * 刷新服务日志到ClickHouse
     */
    private void flushServiceLogsToClickHouse() {
        if (serviceLogBuffer.isEmpty()) {
            return;
        }
        
        List<ServiceLog> logs = new ArrayList<>();
        ServiceLog serviceLog;
        while ((serviceLog = serviceLogBuffer.poll()) != null) {
            logs.add(serviceLog);
        }
        
        if (!logs.isEmpty()) {
            try {
                // 数据去重（基于traceId和timestamp）
                List<ServiceLog> uniqueLogs = removeDuplicateServiceLogs(logs);
                
                // 批量写入ClickHouse
                logRepository.batchInsertServiceLogs(uniqueLogs);
                serviceLogCount.set(0);
                
                log.debug("成功写入服务日志到ClickHouse，数量: {}", uniqueLogs.size());
                
            } catch (Exception e) {
                log.error("写入服务日志到ClickHouse失败", e);
                // 可以考虑重试机制或者将失败的数据写入文件
            }
        }
    }
    
    /**
     * 刷新用户行为日志到ClickHouse
     */
    private void flushUserBehaviorsToClickHouse() {
        if (userBehaviorBuffer.isEmpty()) {
            return;
        }
        
        List<UserBehavior> behaviors = new ArrayList<>();
        UserBehavior behavior;
        while ((behavior = userBehaviorBuffer.poll()) != null) {
            behaviors.add(behavior);
        }
        
        if (!behaviors.isEmpty()) {
            try {
                // 数据去重（基于userId、sessionId和timestamp）
                List<UserBehavior> uniqueBehaviors = removeDuplicateUserBehaviors(behaviors);
                
                // 批量写入ClickHouse
                logRepository.batchInsertUserBehaviors(uniqueBehaviors);
                userBehaviorCount.set(0);
                
                log.debug("成功写入用户行为日志到ClickHouse，数量: {}", uniqueBehaviors.size());
                
            } catch (Exception e) {
                log.error("写入用户行为日志到ClickHouse失败", e);
                // 可以考虑重试机制或者将失败的数据写入文件
            }
        }
    }
    
    /**
     * 服务日志去重
     */
    private List<ServiceLog> removeDuplicateServiceLogs(List<ServiceLog> logs) {
        // 简单的去重逻辑，实际项目中可以使用更复杂的去重策略
        return logs.stream()
                .distinct()
                .toList();
    }
    
    /**
     * 用户行为日志去重
     */
    private List<UserBehavior> removeDuplicateUserBehaviors(List<UserBehavior> behaviors) {
        // 简单的去重逻辑，实际项目中可以使用更复杂的去重策略
        return behaviors.stream()
                .distinct()
                .toList();
    }
    
    /**
     * 销毁资源
     */
    public void destroy() {
        // 写入剩余的日志
        flushServiceLogsToClickHouse();
        flushUserBehaviorsToClickHouse();
        
        // 关闭定时任务
        scheduler.shutdown();
        try {
            if (!scheduler.awaitTermination(5, TimeUnit.SECONDS)) {
                scheduler.shutdownNow();
            }
        } catch (InterruptedException e) {
            scheduler.shutdownNow();
            Thread.currentThread().interrupt();
        }
        
        log.info("日志消费者服务已销毁");
    }
}