package com.flink.hbase.windowingoptimization.shuffle;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;

/**
 * Shuffle分区优化和反压处理教程
 * 
 * 核心内容：
 * 1. 随机分区性能优化
 * 2. 反压检测和处理
 * 3. 动态负载均衡
 * 4. 网络吞吐量优化
 * 5. 缓冲区管理
 * 6. 分区策略选择
 * 
 * 适用场景：
 * - 大规模数据处理
 * - 高并发实时计算
 * - 负载不均衡处理
 * - 网络瓶颈优化
 * 
 * @author Flink Developer
 */
public class ShuffleOptimization {
    
    private static final Logger logger = LoggerFactory.getLogger(ShuffleOptimization.class);
    
    /**
     * 数据流事件模型
     */
    public static class StreamEvent {
        public String eventId;
        public String key;
        public String data;
        public long timestamp;
        public int size;  // 数据大小（字节）
        public int processingTime;  // 期望处理时间（毫秒）
        public String sourcePartition;
        public int priority;
        
        public StreamEvent() {}
        
        public StreamEvent(String eventId, String key, String data, long timestamp, 
                          int size, int processingTime, String sourcePartition, int priority) {
            this.eventId = eventId;
            this.key = key;
            this.data = data;
            this.timestamp = timestamp;
            this.size = size;
            this.processingTime = processingTime;
            this.sourcePartition = sourcePartition;
            this.priority = priority;
        }
        
        @Override
        public String toString() {
            return String.format("StreamEvent{eventId='%s', key='%s', timestamp=%d, " +
                    "size=%d, processingTime=%d, sourcePartition='%s', priority=%d}",
                    eventId, key, timestamp, size, processingTime, sourcePartition, priority);
        }
    }
    
    /**
     * 负载统计信息
     */
    public static class LoadStats {
        public int partitionId;
        public long eventCount;
        public long totalSize;
        public long totalProcessingTime;
        public double avgProcessingTime;
        public long lastUpdateTime;
        
        public LoadStats(int partitionId) {
            this.partitionId = partitionId;
            this.eventCount = 0;
            this.totalSize = 0;
            this.totalProcessingTime = 0;
            this.avgProcessingTime = 0;
            this.lastUpdateTime = System.currentTimeMillis();
        }
        
        public void addEvent(StreamEvent event) {
            eventCount++;
            totalSize += event.size;
            totalProcessingTime += event.processingTime;
            avgProcessingTime = (double) totalProcessingTime / eventCount;
            lastUpdateTime = System.currentTimeMillis();
        }
        
        public double getLoad() {
            // 综合负载指标：事件数 × 平均处理时间 × 数据大小
            return eventCount * avgProcessingTime * (totalSize / Math.max(eventCount, 1));
        }
        
        @Override
        public String toString() {
            return String.format("LoadStats{partitionId=%d, eventCount=%d, " +
                    "totalSize=%d, avgProcessingTime=%.2f, load=%.2f}",
                    partitionId, eventCount, totalSize, avgProcessingTime, getLoad());
        }
    }
    
    /**
     * 智能随机分区器
     * 根据负载情况动态调整分区策略
     */
    public static class SmartRandomPartitioner implements Partitioner<String> {
        
        private final Map<Integer, LoadStats> partitionStats = new ConcurrentHashMap<>();
        private final Random random = new Random();
        private final double loadThreshold = 0.8; // 负载均衡阈值
        
        @Override
        public int partition(String key, int numPartitions) {
            // 初始化分区统计
            for (int i = 0; i < numPartitions; i++) {
                partitionStats.computeIfAbsent(i, LoadStats::new);
            }
            
            // 找到负载最低的分区
            int bestPartition = findBestPartition(numPartitions);
            
            // 如果负载差异不大，使用hash分区保证一致性
            if (isLoadBalanced(numPartitions)) {
                return Math.abs(key.hashCode()) % numPartitions;
            }
            
            // 否则使用负载均衡策略
            return bestPartition;
        }
        
        private int findBestPartition(int numPartitions) {
            double minLoad = Double.MAX_VALUE;
            int bestPartition = 0;
            
            for (int i = 0; i < numPartitions; i++) {
                LoadStats stats = partitionStats.get(i);
                double load = stats.getLoad();
                
                if (load < minLoad) {
                    minLoad = load;
                    bestPartition = i;
                }
            }
            
            return bestPartition;
        }
        
        private boolean isLoadBalanced(int numPartitions) {
            if (partitionStats.size() < numPartitions) return true;
            
            double maxLoad = 0;
            double minLoad = Double.MAX_VALUE;
            
            for (LoadStats stats : partitionStats.values()) {
                double load = stats.getLoad();
                maxLoad = Math.max(maxLoad, load);
                minLoad = Math.min(minLoad, load);
            }
            
            // 负载差异小于阈值认为是均衡的
            return minLoad > 0 && (maxLoad - minLoad) / minLoad < loadThreshold;
        }
        
        public void updatePartitionStats(int partitionId, StreamEvent event) {
            LoadStats stats = partitionStats.get(partitionId);
            if (stats != null) {
                stats.addEvent(event);
            }
        }
    }
    
    /**
     * 反压检测器
     */
    public static class BackpressureDetector extends RichMapFunction<StreamEvent, StreamEvent> {
        
        private static final long DETECTION_INTERVAL = 5000; // 5秒检测间隔
        private static final double BACKPRESSURE_THRESHOLD = 0.8; // 80%阈值
        
        private transient ValueState<Long> processedCountState;
        private transient ValueState<Long> lastCheckTimeState;
        private transient ValueState<Double> throughputState;
        
        private int subtaskIndex;
        private long startTime;
        private long processedCount = 0;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            
            processedCountState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("processed-count", Long.class, 0L));
            lastCheckTimeState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("last-check-time", Long.class, System.currentTimeMillis()));
            throughputState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("throughput", Double.class, 0.0));
                
            subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
            startTime = System.currentTimeMillis();
        }
        
        @Override
        public StreamEvent map(StreamEvent event) throws Exception {
            processedCount++;
            
            long currentTime = System.currentTimeMillis();
            long lastCheckTime = lastCheckTimeState.value();
            
            // 定期检测反压
            if (currentTime - lastCheckTime > DETECTION_INTERVAL) {
                detectBackpressure(currentTime, lastCheckTime);
                lastCheckTimeState.update(currentTime);
            }
            
            // 模拟处理延迟
            if (event.processingTime > 0) {
                Thread.sleep(event.processingTime);
            }
            
            return event;
        }
        
        private void detectBackpressure(long currentTime, long lastCheckTime) throws Exception {
            long processed = processedCountState.value();
            long newProcessed = processedCount;
            
            // 计算吞吐量
            double timeSeconds = (currentTime - lastCheckTime) / 1000.0;
            double throughput = (newProcessed - processed) / timeSeconds;
            
            // 获取历史吞吐量
            double historicalThroughput = throughputState.value();
            
            // 检测反压：当前吞吐量显著低于历史水平
            if (historicalThroughput > 0) {
                double throughputRatio = throughput / historicalThroughput;
                
                if (throughputRatio < BACKPRESSURE_THRESHOLD) {
                    logger.warn("检测到反压: 子任务={}, 当前吞吐量={:.2f}/s, " +
                               "历史吞吐量={:.2f}/s, 比例={:.2f}%",
                               subtaskIndex, throughput, historicalThroughput, 
                               throughputRatio * 100);
                    
                    // 可以在这里触发告警或调整策略
                    handleBackpressure(throughputRatio);
                }
            }
            
            // 更新统计信息
            processedCountState.update(newProcessed);
            
            // 计算移动平均吞吐量
            double newThroughput = historicalThroughput > 0 ? 
                (historicalThroughput * 0.7 + throughput * 0.3) : throughput;
            throughputState.update(newThroughput);
        }
        
        private void handleBackpressure(double throughputRatio) {
            // 反压处理策略
            if (throughputRatio < 0.5) {
                logger.error("严重反压，建议增加并行度或优化处理逻辑");
            } else if (throughputRatio < 0.7) {
                logger.warn("中度反压，考虑优化数据分区策略");
            }
        }
    }
    
    /**
     * 动态负载均衡处理器
     */
    public static class DynamicLoadBalancer extends ProcessFunction<StreamEvent, StreamEvent> {
        
        private static final long REBALANCE_INTERVAL = 10000; // 10秒重平衡间隔
        
        private transient Map<Integer, LoadStats> globalLoadStats;
        private transient long lastRebalanceTime;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            globalLoadStats = new ConcurrentHashMap<>();
            lastRebalanceTime = System.currentTimeMillis();
        }
        
        @Override
        public void processElement(StreamEvent event, Context context, 
                                 Collector<StreamEvent> collector) throws Exception {
            
            int subtaskIndex = ((StreamingRuntimeContext) getRuntimeContext()).getIndexOfThisSubtask();
            
            // 更新负载统计
            LoadStats stats = globalLoadStats.computeIfAbsent(subtaskIndex, LoadStats::new);
            stats.addEvent(event);
            
            // 定期检查是否需要重平衡
            long currentTime = System.currentTimeMillis();
            if (currentTime - lastRebalanceTime > REBALANCE_INTERVAL) {
                checkRebalance();
                lastRebalanceTime = currentTime;
            }
            
            collector.collect(event);
        }
        
        private void checkRebalance() {
            if (globalLoadStats.size() < 2) return;
            
            double totalLoad = globalLoadStats.values().stream()
                .mapToDouble(LoadStats::getLoad)
                .sum();
            
            double avgLoad = totalLoad / globalLoadStats.size();
            
            logger.info("负载均衡检查: 平均负载={:.2f}", avgLoad);
            
            // 检查负载不均衡
            for (LoadStats stats : globalLoadStats.values()) {
                double deviation = Math.abs(stats.getLoad() - avgLoad) / avgLoad;
                
                if (deviation > 0.3) { // 30%偏差
                    logger.warn("检测到负载不均衡: 分区={}, 负载={:.2f}, 平均负载={:.2f}, 偏差={:.2f}%",
                               stats.partitionId, stats.getLoad(), avgLoad, deviation * 100);
                    
                    // 可以在这里触发重平衡策略
                    suggestRebalance(stats, avgLoad);
                }
            }
        }
        
        private void suggestRebalance(LoadStats stats, double avgLoad) {
            if (stats.getLoad() > avgLoad * 1.3) {
                logger.info("建议分区 {} 使用更多资源或分散负载", stats.partitionId);
            } else if (stats.getLoad() < avgLoad * 0.7) {
                logger.info("分区 {} 负载较低，可以承担更多任务", stats.partitionId);
            }
        }
    }
    
    /**
     * 网络优化的批处理器
     */
    public static class NetworkOptimizedBatcher extends RichFlatMapFunction<StreamEvent, List<StreamEvent>> {
        
        private static final int BATCH_SIZE = 100;
        private static final long BATCH_TIMEOUT = 1000; // 1秒超时
        
        private transient List<StreamEvent> batch;
        private transient long lastBatchTime;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            batch = new ArrayList<>();
            lastBatchTime = System.currentTimeMillis();
        }
        
        @Override
        public void flatMap(StreamEvent event, Collector<List<StreamEvent>> collector) throws Exception {
            batch.add(event);
            
            long currentTime = System.currentTimeMillis();
            
            // 批处理触发条件：大小或时间
            if (batch.size() >= BATCH_SIZE || currentTime - lastBatchTime > BATCH_TIMEOUT) {
                // 按优先级排序
                batch.sort((e1, e2) -> Integer.compare(e2.priority, e1.priority));
                
                collector.collect(new ArrayList<>(batch));
                
                logger.debug("发送批次: 大小={}, 耗时={}ms", 
                           batch.size(), currentTime - lastBatchTime);
                
                batch.clear();
                lastBatchTime = currentTime;
            }
        }
    }
    
    /**
     * 缓冲区管理优化
     */
    public void runBufferOptimization() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        
        // 优化缓冲区配置
        env.getConfig().setLatencyTrackingInterval(1000); // 1秒延迟跟踪
        env.setBufferTimeout(100); // 100ms缓冲区超时
        env.setParallelism(4);
        
        // 数据源
        DataStream<StreamEvent> eventStream = env.addSource(new HighVolumeEventSource());
        
        // 使用智能随机分区
        SmartRandomPartitioner partitioner = new SmartRandomPartitioner();
        
        SingleOutputStreamOperator<StreamEvent> shuffledStream = eventStream
            .partitionCustom(partitioner, event -> event.key)
            .map(new BackpressureDetector())
            .name("shuffled-with-backpressure-detection");
        
        // 网络优化批处理
        SingleOutputStreamOperator<List<StreamEvent>> batchedStream = shuffledStream
            .flatMap(new NetworkOptimizedBatcher())
            .name("network-optimized-batching");
        
        // 处理批次
        SingleOutputStreamOperator<String> processedStream = batchedStream
            .map(new BatchProcessor())
            .name("batch-processing");
        
        processedStream.print("处理结果");
        
        env.execute("Buffer Optimization Example");
    }
    
    /**
     * 批处理器
     */
    public static class BatchProcessor implements MapFunction<List<StreamEvent>, String> {
        
        @Override
        public String map(List<StreamEvent> batch) throws Exception {
            int totalSize = batch.stream().mapToInt(e -> e.size).sum();
            long totalTime = batch.stream().mapToLong(e -> e.processingTime).sum();
            
            return String.format("批处理完成: 事件数=%d, 总大小=%d, 总时间=%d", 
                               batch.size(), totalSize, totalTime);
        }
    }
    
    /**
     * 反压处理策略示例
     */
    public void runBackpressureHandling() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        
        // 高吞吐量数据源
        DataStream<StreamEvent> eventStream = env.addSource(new HighVolumeEventSource());
        
        // 反压检测和处理
        SingleOutputStreamOperator<StreamEvent> monitoredStream = eventStream
            .map(new BackpressureDetector())
            .name("backpressure-detection");
        
        // 动态负载均衡
        SingleOutputStreamOperator<StreamEvent> balancedStream = monitoredStream
            .process(new DynamicLoadBalancer())
            .name("dynamic-load-balancing");
        
        // 自适应处理
        SingleOutputStreamOperator<StreamEvent> adaptiveStream = balancedStream
            .map(new AdaptiveProcessor())
            .name("adaptive-processing");
        
        adaptiveStream.addSink(new PerformanceMonitoringSink()).name("performance-monitoring");
        
        env.execute("Backpressure Handling Example");
    }
    
    /**
     * 自适应处理器
     */
    public static class AdaptiveProcessor extends RichMapFunction<StreamEvent, StreamEvent> {
        
        private transient ValueState<Double> loadFactorState;
        private long processedCount = 0;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            loadFactorState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("load-factor", Double.class, 1.0));
        }
        
        @Override
        public StreamEvent map(StreamEvent event) throws Exception {
            processedCount++;
            
            double loadFactor = loadFactorState.value();
            
            // 根据负载因子调整处理策略
            if (loadFactor > 1.5) {
                // 高负载时，简化处理
                event.processingTime = Math.max(1, event.processingTime / 2);
            } else if (loadFactor < 0.5) {
                // 低负载时，可以进行更复杂的处理
                event.processingTime = Math.min(100, event.processingTime * 2);
            }
            
            // 每1000条记录调整一次负载因子
            if (processedCount % 1000 == 0) {
                adjustLoadFactor();
            }
            
            return event;
        }
        
        private void adjustLoadFactor() throws Exception {
            double currentLoad = loadFactorState.value();
            
            // 基于系统负载调整（这里简化为随机调整）
            double adjustment = (ThreadLocalRandom.current().nextDouble() - 0.5) * 0.1;
            double newLoad = Math.max(0.1, Math.min(2.0, currentLoad + adjustment));
            
            loadFactorState.update(newLoad);
            
            logger.debug("调整负载因子: {} -> {}", currentLoad, newLoad);
        }
    }
    
    /**
     * 分区策略对比示例
     */
    public void runPartitioningComparison() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        
        DataStream<StreamEvent> eventStream = env.addSource(new HighVolumeEventSource());
        
        // 1. 默认Hash分区
        SingleOutputStreamOperator<Tuple2<String, Long>> hashPartitioned = eventStream
            .keyBy(event -> event.key)
            .map(new EventCounter("hash"))
            .name("hash-partitioned");
        
        // 2. 随机分区
        SingleOutputStreamOperator<Tuple2<String, Long>> randomPartitioned = eventStream
            .shuffle()
            .map(new EventCounter("random"))
            .name("random-partitioned");
        
        // 3. 智能分区
        SingleOutputStreamOperator<Tuple2<String, Long>> smartPartitioned = eventStream
            .partitionCustom(new SmartRandomPartitioner(), event -> event.key)
            .map(new EventCounter("smart"))
            .name("smart-partitioned");
        
        // 4. 轮询分区
        SingleOutputStreamOperator<Tuple2<String, Long>> roundRobinPartitioned = eventStream
            .rebalance()
            .map(new EventCounter("round-robin"))
            .name("round-robin-partitioned");
        
        // 输出对比结果
        hashPartitioned.print("Hash分区");
        randomPartitioned.print("随机分区");
        smartPartitioned.print("智能分区");
        roundRobinPartitioned.print("轮询分区");
        
        env.execute("Partitioning Comparison");
    }
    
    /**
     * 事件计数器
     */
    public static class EventCounter extends RichMapFunction<StreamEvent, Tuple2<String, Long>> {
        
        private final String partitionType;
        private transient ValueState<Long> countState;
        
        public EventCounter(String partitionType) {
            this.partitionType = partitionType;
        }
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            countState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("count", Long.class, 0L));
        }
        
        @Override
        public Tuple2<String, Long> map(StreamEvent event) throws Exception {
            long count = countState.value() + 1;
            countState.update(count);
            
            int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
            String key = String.format("%s-subtask-%d", partitionType, subtaskIndex);
            
            return new Tuple2<>(key, count);
        }
    }
    
    /**
     * 高吞吐量事件源
     */
    public static class HighVolumeEventSource implements SourceFunction<StreamEvent> {
        
        private volatile boolean running = true;
        private final Random random = new Random();
        
        @Override
        public void run(SourceContext<StreamEvent> ctx) throws Exception {
            String[] keys = new String[1000];
            for (int i = 0; i < keys.length; i++) {
                keys[i] = "key_" + i;
            }
            
            long eventId = 0;
            
            while (running) {
                // 模拟数据倾斜：部分key出现频率更高
                String key;
                if (random.nextDouble() < 0.2) {
                    key = "hot_key_" + (eventId % 10); // 20%概率选择热点key
                } else {
                    key = keys[random.nextInt(keys.length)];
                }
                
                StreamEvent event = new StreamEvent(
                    "event_" + eventId++,
                    key,
                    "data_" + random.nextInt(1000),
                    System.currentTimeMillis(),
                    random.nextInt(1000) + 100, // 100-1100字节
                    random.nextInt(50) + 1, // 1-50ms处理时间
                    "partition_" + random.nextInt(4),
                    random.nextInt(10) + 1 // 1-10优先级
                );
                
                ctx.collect(event);
                
                // 控制生产速率
                if (eventId % 100 == 0) {
                    Thread.sleep(1);
                }
            }
        }
        
        @Override
        public void cancel() {
            running = false;
        }
    }
    
    /**
     * 性能监控Sink
     */
    public static class PerformanceMonitoringSink implements SinkFunction<StreamEvent> {
        
        private final Map<String, AtomicLong> metrics = new ConcurrentHashMap<>();
        private long lastReportTime = System.currentTimeMillis();
        
        @Override
        public void invoke(StreamEvent event, Context context) throws Exception {
            // 更新指标
            metrics.computeIfAbsent("total_events", k -> new AtomicLong(0)).incrementAndGet();
            metrics.computeIfAbsent("total_size", k -> new AtomicLong(0)).addAndGet(event.size);
            
            // 按优先级统计
            String priorityKey = "priority_" + event.priority;
            metrics.computeIfAbsent(priorityKey, k -> new AtomicLong(0)).incrementAndGet();
            
            // 定期报告
            long currentTime = System.currentTimeMillis();
            if (currentTime - lastReportTime > 30000) { // 30秒报告一次
                reportMetrics();
                lastReportTime = currentTime;
            }
        }
        
        private void reportMetrics() {
            logger.info("=== 性能监控报告 ===");
            metrics.forEach((key, value) -> 
                logger.info("{}: {}", key, value.get()));
            logger.info("==================");
        }
    }
    
    public static void main(String[] args) throws Exception {
        ShuffleOptimization optimization = new ShuffleOptimization();
        
        // 选择运行的示例
        String mode = args.length > 0 ? args[0] : "buffer";
        
        switch (mode) {
            case "buffer":
                optimization.runBufferOptimization();
                break;
            case "backpressure":
                optimization.runBackpressureHandling();
                break;
            case "comparison":
                optimization.runPartitioningComparison();
                break;
            default:
                System.out.println("支持的模式: buffer, backpressure, comparison");
                System.out.println("使用示例: java ShuffleOptimization buffer");
                break;
        }
    }
} 