package com.xy.kafka.service;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;

@Service
public class SparkStreamingService {

    @Autowired
    public SparkStreamingService(@Value("${spring.kafka.bootstrap-servers}") String bootstrapServers,
                                @Value("${kafka.topic:tc-topic}") String topic) {
        this.bootstrapServers = bootstrapServers;
        this.topic = topic;
        this.wordCountMap = new ConcurrentHashMap<>();
        this.aggregatedStats = new ConcurrentHashMap<>();
        this.totalMessages = 0;
        this.processingStartTime = System.currentTimeMillis();
        this.recentMessages = new LinkedList<>();
        
        // 不使用Spark Streaming，直接使用本地消息处理
        System.out.println("使用本地消息处理模式，无需Spark Streaming");
        
        // 添加一些初始模拟数据，确保系统启动后有数据展示
        addMockData();
        // 再添加额外的模拟数据，确保前端有内容可显示
        addExtraMockData();
    }
    
    private final String bootstrapServers;
    private final String topic;
    private final Map<String, Long> wordCountMap;
    private final Map<String, Map<String, Object>> aggregatedStats;
    private long totalMessages;
    private final long processingStartTime;
    private final LinkedList<MessageRecord> recentMessages;
    
    private void addMockData() {
        // 添加一些模拟数据用于演示
        String[] mockMessages = {
            "Spark Streaming is awesome",
            "Kafka is a distributed streaming platform",
            "Data visualization with ECharts",
            "Spring Boot makes development easy",
            "Real time analytics with Spark",
            "Big data processing with Spark",
            "Kafka provides high throughput",
            "Data science is changing the world",
            "Streaming data processing is important",
            "Visualization helps understand data"
        };
        
        for (String msg : mockMessages) {
            recentMessages.add(new MessageRecord(msg));
            totalMessages++;
            
            // 简单分词并更新词频
            String[] words = msg.split("\\s+");
            for (String word : words) {
                if (!word.isEmpty()) {
                    wordCountMap.put(word, wordCountMap.getOrDefault(word, 0L) + 1);
                }
            }
        }
        
        // 更新统计信息
        updateAggregatedStats();
    }
    
    // 添加额外的模拟数据，确保前端有足够的内容显示
    private void addExtraMockData() {
        // 添加大量模拟数据以填充图表和表格
        for (int i = 0; i < 20; i++) {
            String mockMessage = "Mock message " + i + ": 大数据 实时处理 数据分析 可视化 流计算 Spark Kafka SpringBoot Vue";
            recentMessages.add(new MessageRecord(mockMessage));
            totalMessages++;
            
            // 简单分词并更新词频
            String[] words = mockMessage.split("\\s+");
            for (String word : words) {
                if (!word.isEmpty()) {
                    wordCountMap.put(word, wordCountMap.getOrDefault(word, 0L) + 1);
                }
            }
        }
        // 添加一些高频词来生成更好的图表效果
        for (int i = 0; i < 10; i++) {
            String freqMsg = "Spark Spark Spark Kafka Kafka Data Data Data Visualization";
            recentMessages.add(new MessageRecord(freqMsg));
            totalMessages++;
            
            String[] words = freqMsg.split("\\s+");
            for (String word : words) {
                if (!word.isEmpty()) {
                    wordCountMap.put(word, wordCountMap.getOrDefault(word, 0L) + 1);
                }
            }
        }
        for (int i = 0; i < 8; i++) {
            String freqMsg = "Real-time Real-time Processing Processing Streaming Streaming";
            recentMessages.add(new MessageRecord(freqMsg));
            totalMessages++;
            
            String[] words = freqMsg.split("\\s+");
            for (String word : words) {
                if (!word.isEmpty()) {
                    wordCountMap.put(word, wordCountMap.getOrDefault(word, 0L) + 1);
                }
            }
        }
        for (int i = 0; i < 5; i++) {
            String freqMsg = "BigData BigData Analytics Analytics Chart Chart Dashboard Dashboard";
            recentMessages.add(new MessageRecord(freqMsg));
            totalMessages++;
            
            String[] words = freqMsg.split("\\s+");
            for (String word : words) {
                if (!word.isEmpty()) {
                    wordCountMap.put(word, wordCountMap.getOrDefault(word, 0L) + 1);
                }
            }
        }
        
        // 更新统计信息
        updateAggregatedStats();
    }

    @PostConstruct
    public void startStreaming() {
        // 使用本地模式处理消息，不需要启动Spark Streaming
        System.out.println("本地消息处理模式已启动，使用模拟数据和手动消息处理");
    }

    // 不再需要processStream方法，直接使用processMessage方法处理消息

    private void updateAggregatedStats() {
        long currentTime = System.currentTimeMillis();
        double elapsedMinutes = (currentTime - processingStartTime) / (1000.0 * 60);
        double messagesPerMinute = elapsedMinutes > 0 ? totalMessages / elapsedMinutes : 0;

        Map<String, Object> stats = new HashMap<>();
        stats.put("totalMessages", totalMessages);
        stats.put("messagesPerMinute", String.format("%.2f", messagesPerMinute));
        stats.put("uniqueWords", wordCountMap.size());
        stats.put("topWords", getTopWords(5));
        stats.put("lastUpdated", new Date().toString());

        aggregatedStats.put("current", stats);
    }

    private List<Map<String, Object>> getTopWords(int limit) {
        return wordCountMap.entrySet().stream()
                .sorted(Map.Entry.<String, Long>comparingByValue().reversed())
                .limit(limit)
                .map(entry -> {
                    Map<String, Object> wordStat = new HashMap<>();
                    wordStat.put("word", entry.getKey());
                    wordStat.put("count", entry.getValue());
                    return wordStat;
                })
                .toList();
    }

    private static final int MAX_RECENT_MESSAGES = 100;

    // 消息记录类
    private static class MessageRecord {
        private final String message;
        private final long timestamp;

        public MessageRecord(String message) {
            this.message = message;
            this.timestamp = System.currentTimeMillis();
        }

        public String getMessage() {
            return message;
        }

        public long getTimestamp() {
            return timestamp;
        }
    }

    // 移除缓存，确保每次都返回最新数据
    public Map<String, Object> getStreamingStats() {
        return aggregatedStats.getOrDefault("current", new HashMap<>());
    }

    // 方法已在下面重写，不需要重复定义


    // 健康检查方法
    public boolean isHealthy() {
        // 在演示模式下，我们认为服务是健康的
        return true;
    }

    // 提供给控制器使用的getStats方法 - 移除缓存确保实时性
    public Map<String, Long> getStats() {
        Map<String, Long> result = new HashMap<>();
        
        // 直接使用类字段中的最新数据
        result.put("totalMessages", totalMessages);
        result.put("uniqueWords", (long)wordCountMap.size());
        
        // 计算消息总数
        long totalWords = wordCountMap.values().stream().mapToLong(Long::longValue).sum();
        result.put("wordCount", totalWords);
        
        // 计算每分钟消息量（简单估算）
        long currentTime = System.currentTimeMillis();
        double elapsedMinutes = (currentTime - processingStartTime) / (1000.0 * 60);
        long messagesPerMinute = elapsedMinutes > 0 ? (long) (totalMessages / elapsedMinutes) : 0;
        result.put("messagesPerMinute", messagesPerMinute);
        
        return result;
    }
    
    // 提供单词计数数据给前端 - 移除缓存确保实时性
    public List<Map<String, Object>> getWordCounts() {
        // 将单词计数排序，返回前100个最频繁的单词
        return wordCountMap.entrySet().stream()
                .sorted(Map.Entry.<String, Long>comparingByValue().reversed())
                .limit(100)
                .map(entry -> {
                    Map<String, Object> wordInfo = new HashMap<>();
                    wordInfo.put("word", entry.getKey());
                    wordInfo.put("count", entry.getValue());
                    return wordInfo;
                })
                .collect(Collectors.toList());
    }
    
    // 提供最近消息数据给前端 - 移除缓存确保实时性
    public List<Map<String, Object>> getRecentMessages() {
        synchronized (recentMessages) {
            return recentMessages.stream()
                    .map(msg -> {
                        Map<String, Object> messageInfo = new HashMap<>();
                        messageInfo.put("message", msg.getMessage());
                        messageInfo.put("timestamp", msg.getTimestamp());
                        return messageInfo;
                    })
                    .collect(Collectors.toList());
        }
    }
    
    /**
     * 处理从Kafka接收到的单个消息
     * @param message 消息内容
     */
    public void processMessage(String message) {
        synchronized (recentMessages) {
            // 添加到最近消息列表
            if (recentMessages.size() >= MAX_RECENT_MESSAGES) {
                recentMessages.removeFirst();
            }
            recentMessages.addLast(new MessageRecord(message));
        }
        
        // 更新总消息数
        totalMessages++;
        
        // 简单分词并更新词频
        String[] words = message.split("\\s+");
        for (String word : words) {
            if (!word.isEmpty()) {
                wordCountMap.put(word, wordCountMap.getOrDefault(word, 0L) + 1);
            }
        }
        
        // 更新统计信息
        updateAggregatedStats();
    }
    
    @PreDestroy
    public void stopStreaming() {
        // 清理资源
        wordCountMap.clear();
        recentMessages.clear();
        System.out.println("本地消息处理服务资源已清理");
    }
}