package com.datagateway.component;

import com.datagateway.model.ProcessedData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 多数据源路由器
 * 负责根据路由规则将数据路由到不同的数据源
 * 
 * @author Data Gateway Team
 * @version 1.0.0
 */
@Component
public class MultiDataSourceRouter {

    private static final Logger logger = LoggerFactory.getLogger(MultiDataSourceRouter.class);

    @Autowired
    private MultiDataSourceManager multiDataSourceManager;

    @Autowired
    private AlertManager alertManager;

    @Autowired
    private SystemMonitor systemMonitor;

    /**
     * 路由统计信息
     */
    private final AtomicLong totalRouteRequests = new AtomicLong(0);
    private final AtomicLong successfulRoutes = new AtomicLong(0);
    private final AtomicLong failedRoutes = new AtomicLong(0);

    /**
     * 路由缓存
     */
    private final ConcurrentHashMap<String, String> routeCache = new ConcurrentHashMap<>();

    /**
     * 路由数据到Kafka集群
     * 
     * @param data 处理后的数据
     * @return 目标Kafka集群ID
     */
    public String routeToKafka(ProcessedData data) {
        try {
            totalRouteRequests.incrementAndGet();
            
            String topic = data.getSourceTopic();
            String cacheKey = "kafka:" + topic + ":" + data.getId();
            
            // 检查路由缓存
            String cachedCluster = routeCache.get(cacheKey);
            if (cachedCluster != null) {
                logger.debug("使用缓存的路由结果: {} -> {}", topic, cachedCluster);
                return cachedCluster;
            }
            
            // 执行路由逻辑
            String clusterId = multiDataSourceManager.routeToKafkaCluster(topic, data);
            
            // 缓存路由结果
            routeCache.put(cacheKey, clusterId);
            
            // 更新统计信息
            successfulRoutes.incrementAndGet();
            
            logger.debug("数据路由到Kafka集群: {} -> {}", topic, clusterId);
            
            // 记录路由统计
            systemMonitor.recordRouteEvent("kafka", topic, clusterId);
            
            return clusterId;
            
        } catch (Exception e) {
            failedRoutes.incrementAndGet();
            logger.error("Kafka路由失败: {}", data.getSourceTopic(), e);
            alertManager.sendSystemErrorAlert("Kafka路由失败", e.getMessage());
            
            // 返回默认集群
            return multiDataSourceManager.getStatistics().getDefaultKafkaCluster();
        }
    }

    /**
     * 路由数据到Hive实例
     * 
     * @param data 处理后的数据
     * @return 目标Hive实例ID
     */
    public String routeToHive(ProcessedData data) {
        try {
            totalRouteRequests.incrementAndGet();
            
            // 从数据中提取表名（这里简化处理，实际项目中可能需要更复杂的逻辑）
            String tableName = extractTableName(data);
            String cacheKey = "hive:" + tableName + ":" + data.getId();
            
            // 检查路由缓存
            String cachedInstance = routeCache.get(cacheKey);
            if (cachedInstance != null) {
                logger.debug("使用缓存的路由结果: {} -> {}", tableName, cachedInstance);
                return cachedInstance;
            }
            
            // 执行路由逻辑
            String instanceId = multiDataSourceManager.routeToHiveInstance(tableName, data);
            
            // 缓存路由结果
            routeCache.put(cacheKey, instanceId);
            
            // 更新统计信息
            successfulRoutes.incrementAndGet();
            
            logger.debug("数据路由到Hive实例: {} -> {}", tableName, instanceId);
            
            // 记录路由统计
            systemMonitor.recordRouteEvent("hive", tableName, instanceId);
            
            return instanceId;
            
        } catch (Exception e) {
            failedRoutes.incrementAndGet();
            logger.error("Hive路由失败: {}", data.getId(), e);
            alertManager.sendSystemErrorAlert("Hive路由失败", e.getMessage());
            
            // 返回默认实例
            return multiDataSourceManager.getStatistics().getDefaultHiveInstance();
        }
    }

    /**
     * 批量路由数据到Kafka集群
     * 
     * @param dataList 数据列表
     * @return 路由结果映射
     */
    public Map<String, String> batchRouteToKafka(java.util.List<ProcessedData> dataList) {
        Map<String, String> routeResults = new ConcurrentHashMap<>();
        
        try {
            for (ProcessedData data : dataList) {
                String clusterId = routeToKafka(data);
                routeResults.put(data.getId(), clusterId);
            }
            
            logger.info("批量Kafka路由完成: {} 条数据", dataList.size());
            
        } catch (Exception e) {
            logger.error("批量Kafka路由失败", e);
            alertManager.sendSystemErrorAlert("批量Kafka路由失败", e.getMessage());
        }
        
        return routeResults;
    }

    /**
     * 批量路由数据到Hive实例
     * 
     * @param dataList 数据列表
     * @return 路由结果映射
     */
    public Map<String, String> batchRouteToHive(java.util.List<ProcessedData> dataList) {
        Map<String, String> routeResults = new ConcurrentHashMap<>();
        
        try {
            for (ProcessedData data : dataList) {
                String instanceId = routeToHive(data);
                routeResults.put(data.getId(), instanceId);
            }
            
            logger.info("批量Hive路由完成: {} 条数据", dataList.size());
            
        } catch (Exception e) {
            logger.error("批量Hive路由失败", e);
            alertManager.sendSystemErrorAlert("批量Hive路由失败", e.getMessage());
        }
        
        return routeResults;
    }

    /**
     * 从数据中提取表名
     * 
     * @param data 处理后的数据
     * @return 表名
     */
    private String extractTableName(ProcessedData data) {
        try {
            // 从转换后的数据中提取表名
            Map<String, Object> transformedData = data.getTransformedData();
            if (transformedData != null && transformedData.containsKey("tableName")) {
                return transformedData.get("tableName").toString();
            }
            
            // 从原始数据中提取表名
            String originalData = data.getOriginalData();
            if (originalData != null && originalData.contains("table")) {
                // 这里可以实现更复杂的表名提取逻辑
                // 简化实现：使用默认表名
                return "default_table";
            }
            
            // 使用默认表名
            return "default_table";
            
        } catch (Exception e) {
            logger.warn("提取表名失败，使用默认表名: {}", e.getMessage());
            return "default_table";
        }
    }

    /**
     * 清理路由缓存
     */
    public void clearRouteCache() {
        try {
            int cacheSize = routeCache.size();
            routeCache.clear();
            
            logger.info("路由缓存已清理，清理条目数: {}", cacheSize);
            
        } catch (Exception e) {
            logger.error("清理路由缓存失败", e);
        }
    }

    /**
     * 获取路由统计信息
     * 
     * @return 路由统计信息
     */
    public RouteStatistics getRouteStatistics() {
        return new RouteStatistics(
            totalRouteRequests.get(),
            successfulRoutes.get(),
            failedRoutes.get(),
            routeCache.size(),
            System.currentTimeMillis()
        );
    }

    /**
     * 重置路由统计信息
     */
    public void resetRouteStatistics() {
        totalRouteRequests.set(0);
        successfulRoutes.set(0);
        failedRoutes.set(0);
        
        logger.info("路由统计信息已重置");
    }

    /**
     * 获取路由缓存信息
     * 
     * @return 路由缓存信息
     */
    public Map<String, String> getRouteCache() {
        return new ConcurrentHashMap<>(routeCache);
    }

    /**
     * 路由统计信息类
     */
    public static class RouteStatistics {
        private final long totalRouteRequests;
        private final long successfulRoutes;
        private final long failedRoutes;
        private final int cacheSize;
        private final long timestamp;

        public RouteStatistics(long totalRouteRequests, long successfulRoutes, long failedRoutes,
                             int cacheSize, long timestamp) {
            this.totalRouteRequests = totalRouteRequests;
            this.successfulRoutes = successfulRoutes;
            this.failedRoutes = failedRoutes;
            this.cacheSize = cacheSize;
            this.timestamp = timestamp;
        }

        // Getter方法
        public long getTotalRouteRequests() { return totalRouteRequests; }
        public long getSuccessfulRoutes() { return successfulRoutes; }
        public long getFailedRoutes() { return failedRoutes; }
        public int getCacheSize() { return cacheSize; }
        public long getTimestamp() { return timestamp; }
        
        public double getSuccessRate() { 
            return totalRouteRequests > 0 ? (double) successfulRoutes / totalRouteRequests * 100 : 0; 
        }
        
        public double getFailureRate() { 
            return totalRouteRequests > 0 ? (double) failedRoutes / totalRouteRequests * 100 : 0; 
        }
    }
}
