package com.flink.hbase.windowingoptimization.keyby;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.CoProcessFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;

/**
 * keyBy优化配置和数据倾斜处理方案
 * 
 * 涵盖场景：
 * 1. 数据倾斜检测和处理
 * 2. 动态keyBy策略
 * 3. 预聚合技术
 * 4. 负载均衡算法
 * 5. 热点数据处理
 * 6. 自定义分区策略
 * 
 * @author Flink Developer
 */
public class KeyByOptimization {
    
    private static final Logger logger = LoggerFactory.getLogger(KeyByOptimization.class);
    
    /**
     * 数据事件模型
     */
    public static class DataEvent {
        public String key;
        public String value;
        public long timestamp;
        public String category;
        public double amount;
        public String source;
        public int priority;
        
        public DataEvent() {}
        
        public DataEvent(String key, String value, long timestamp, String category, 
                        double amount, String source, int priority) {
            this.key = key;
            this.value = value;
            this.timestamp = timestamp;
            this.category = category;
            this.amount = amount;
            this.source = source;
            this.priority = priority;
        }
        
        @Override
        public String toString() {
            return String.format("DataEvent{key='%s', value='%s', timestamp=%d, " +
                    "category='%s', amount=%.2f, source='%s', priority=%d}",
                    key, value, timestamp, category, amount, source, priority);
        }
    }
    
    /**
     * 倾斜检测统计信息
     */
    public static class SkewStats {
        public String key;
        public long count;
        public double percentage;
        public long lastUpdateTime;
        
        public SkewStats(String key, long count, double percentage) {
            this.key = key;
            this.count = count;
            this.percentage = percentage;
            this.lastUpdateTime = System.currentTimeMillis();
        }
        
        @Override
        public String toString() {
            return String.format("SkewStats{key='%s', count=%d, percentage=%.2f%%}",
                    key, count, percentage);
        }
    }
    
    /**
     * 数据倾斜检测器
     */
    public static class DataSkewDetector extends ProcessFunction<DataEvent, DataEvent> {
        
        private static final long STATS_WINDOW_SIZE = 60000; // 1分钟窗口
        private static final double SKEW_THRESHOLD = 0.2; // 20%阈值
        
        private transient Map<String, AtomicLong> keyCounters;
        private transient AtomicLong totalCount;
        private transient long lastStatsTime;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            keyCounters = new ConcurrentHashMap<>();
            totalCount = new AtomicLong(0);
            lastStatsTime = System.currentTimeMillis();
        }
        
        @Override
        public void processElement(DataEvent event, Context context, 
                                 Collector<DataEvent> collector) throws Exception {
            
            // 更新计数器
            keyCounters.computeIfAbsent(event.key, k -> new AtomicLong(0)).incrementAndGet();
            totalCount.incrementAndGet();
            
            // 定期检测倾斜
            long currentTime = System.currentTimeMillis();
            if (currentTime - lastStatsTime > STATS_WINDOW_SIZE) {
                detectSkew(context);
                lastStatsTime = currentTime;
            }
            
            collector.collect(event);
        }
        
        private void detectSkew(Context context) {
            long total = totalCount.get();
            if (total == 0) return;
            
            List<SkewStats> skewedKeys = new ArrayList<>();
            
            keyCounters.forEach((key, counter) -> {
                long count = counter.get();
                double percentage = (double) count / total;
                
                if (percentage > SKEW_THRESHOLD) {
                    skewedKeys.add(new SkewStats(key, count, percentage * 100));
                }
            });
            
            if (!skewedKeys.isEmpty()) {
                logger.warn("检测到数据倾斜:");
                skewedKeys.forEach(stats -> logger.warn("  {}", stats));
                
                // 发送倾斜告警
                skewedKeys.forEach(stats -> {
                    context.output(OutputTags.SKEW_ALERT, stats);
                });
            }
            
            // 重置计数器
            keyCounters.clear();
            totalCount.set(0);
        }
    }
    
    /**
     * 输出标签定义
     */
    public static class OutputTags {
        public static final org.apache.flink.util.OutputTag<SkewStats> SKEW_ALERT =
            new org.apache.flink.util.OutputTag<SkewStats>("skew-alert") {};
        
        public static final org.apache.flink.util.OutputTag<DataEvent> HOT_DATA =
            new org.apache.flink.util.OutputTag<DataEvent>("hot-data") {};
    }
    
    /**
     * 智能keyBy选择器 - 基于数据特征动态选择key
     */
    public static class SmartKeySelector implements KeySelector<DataEvent, String> {
        
        private final Map<String, Long> keyFrequency = new ConcurrentHashMap<>();
        private final long maxFrequency = 1000; // 频率阈值
        
        @Override
        public String getKey(DataEvent event) throws Exception {
            String originalKey = event.key;
            
            // 检查key频率
            long frequency = keyFrequency.merge(originalKey, 1L, Long::sum);
            
            // 如果超过阈值，使用复合key进行分散
            if (frequency > maxFrequency) {
                return originalKey + "_" + (event.hashCode() % 10);
            }
            
            return originalKey;
        }
    }
    
    /**
     * 预聚合器 - 在keyBy之前进行本地聚合
     */
    public static class PreAggregator extends RichMapFunction<DataEvent, DataEvent> {
        
        private transient ValueState<Double> localSum;
        private transient ValueState<Long> localCount;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            
            localSum = getRuntimeContext().getState(
                new ValueStateDescriptor<>("local-sum", Double.class, 0.0));
            localCount = getRuntimeContext().getState(
                new ValueStateDescriptor<>("local-count", Long.class, 0L));
        }
        
        @Override
        public DataEvent map(DataEvent event) throws Exception {
            Double currentSum = localSum.value();
            Long currentCount = localCount.value();
            
            // 累加本地聚合
            currentSum += event.amount;
            currentCount += 1;
            
            localSum.update(currentSum);
            localCount.update(currentCount);
            
            // 每100条记录输出一次聚合结果
            if (currentCount % 100 == 0) {
                DataEvent aggregated = new DataEvent(
                    event.key,
                    "aggregated_" + currentCount,
                    System.currentTimeMillis(),
                    event.category,
                    currentSum,
                    event.source,
                    event.priority
                );
                
                // 重置本地状态
                localSum.clear();
                localCount.clear();
                
                return aggregated;
            }
            
            return null; // 不输出，继续累加
        }
    }
    
    /**
     * 负载均衡分区器
     */
    public static class LoadBalancingPartitioner implements Partitioner<String> {
        
        private final Random random = new Random();
        private final Map<String, Integer> keyPartitionMap = new ConcurrentHashMap<>();
        
        @Override
        public int partition(String key, int numPartitions) {
            // 对于热点key，使用随机分区
            if (isHotKey(key)) {
                return random.nextInt(numPartitions);
            }
            
            // 对于普通key，保持一致性分区
            return keyPartitionMap.computeIfAbsent(key, 
                k -> Math.abs(k.hashCode()) % numPartitions);
        }
        
        private boolean isHotKey(String key) {
            // 简单的热点检测逻辑
            return key.startsWith("hot_") || key.length() < 5;
        }
    }
    
    /**
     * 热点数据处理器
     */
    public static class HotDataProcessor extends ProcessFunction<DataEvent, DataEvent> {
        
        private static final int HOT_THRESHOLD = 1000;
        private final Map<String, AtomicLong> keyCounters = new ConcurrentHashMap<>();
        
        @Override
        public void processElement(DataEvent event, Context context, 
                                 Collector<DataEvent> collector) throws Exception {
            
            String key = event.key;
            long count = keyCounters.computeIfAbsent(key, k -> new AtomicLong(0))
                                   .incrementAndGet();
            
            // 检测热点数据
            if (count > HOT_THRESHOLD) {
                logger.warn("检测到热点数据: key={}, count={}", key, count);
                
                // 热点数据特殊处理
                DataEvent hotEvent = new DataEvent(
                    key + "_hot",
                    event.value,
                    event.timestamp,
                    event.category,
                    event.amount,
                    event.source,
                    event.priority
                );
                
                // 输出到热点数据流
                context.output(OutputTags.HOT_DATA, hotEvent);
            }
            
            collector.collect(event);
        }
    }
    
    /**
     * 两阶段聚合处理
     */
    public void runTwoPhaseAggregation() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        
        // 数据源
        DataStream<DataEvent> dataStream = env.addSource(new DataEventSource());
        
        // 第一阶段：本地预聚合
        SingleOutputStreamOperator<DataEvent> preAggregated = dataStream
            .keyBy(event -> event.key + "_" + (event.hashCode() % 4)) // 增加随机后缀
            .map(new PreAggregator())
            .filter(Objects::nonNull)
            .name("pre-aggregation");
        
        // 第二阶段：全局聚合
        SingleOutputStreamOperator<Tuple2<String, Double>> globalAggregated = preAggregated
            .keyBy(event -> event.key)
            .reduce((event1, event2) -> {
                return new DataEvent(
                    event1.key,
                    "final_aggregated",
                    Math.max(event1.timestamp, event2.timestamp),
                    event1.category,
                    event1.amount + event2.amount,
                    event1.source,
                    event1.priority
                );
            })
            .map(event -> new Tuple2<>(event.key, event.amount))
            .name("global-aggregation");
        
        globalAggregated.print("两阶段聚合结果");
        
        env.execute("Two-Phase Aggregation");
    }
    
    /**
     * 动态负载均衡策略
     */
    public void runDynamicLoadBalancing() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        
        DataStream<DataEvent> dataStream = env.addSource(new DataEventSource());
        
        // 数据倾斜检测
        SingleOutputStreamOperator<DataEvent> monitoredStream = dataStream
            .process(new DataSkewDetector())
            .name("skew-detection");
        
        // 获取倾斜告警
        DataStream<SkewStats> skewAlerts = monitoredStream
            .getSideOutput(OutputTags.SKEW_ALERT);
        
        // 处理正常数据
        SingleOutputStreamOperator<DataEvent> normalData = monitoredStream
            .process(new HotDataProcessor())
            .name("hot-data-processing");
        
        // 获取热点数据
        DataStream<DataEvent> hotData = normalData
            .getSideOutput(OutputTags.HOT_DATA);
        
        // 正常数据使用智能keyBy
        SingleOutputStreamOperator<Tuple2<String, Long>> normalStats = normalData
            .keyBy(new SmartKeySelector())
            .map(new DataCounter())
            .name("normal-data-stats");
        
        // 热点数据使用随机分区
        SingleOutputStreamOperator<Tuple2<String, Long>> hotStats = hotData
            .partitionCustom(new LoadBalancingPartitioner(), event -> event.key)
            .map(new DataCounter())
            .name("hot-data-stats");
        
        // 输出结果
        normalStats.print("正常数据统计");
        hotStats.print("热点数据统计");
        skewAlerts.print("倾斜告警");
        
        env.execute("Dynamic Load Balancing");
    }
    
    /**
     * 数据计数器
     */
    public static class DataCounter extends RichMapFunction<DataEvent, Tuple2<String, Long>> {
        
        private transient ValueState<Long> countState;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            countState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("count", Long.class, 0L));
        }
        
        @Override
        public Tuple2<String, Long> map(DataEvent event) throws Exception {
            Long count = countState.value();
            count++;
            countState.update(count);
            
            return new Tuple2<>(event.key, count);
        }
    }
    
    /**
     * 多级keyBy策略
     */
    public void runMultiLevelKeyBy() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        
        DataStream<DataEvent> dataStream = env.addSource(new DataEventSource());
        
        // 第一级keyBy：按类别分组
        SingleOutputStreamOperator<DataEvent> categoryGrouped = dataStream
            .keyBy(event -> event.category)
            .process(new CategoryProcessor())
            .name("category-grouping");
        
        // 第二级keyBy：按原始key分组
        SingleOutputStreamOperator<Tuple3<String, String, Long>> finalStats = categoryGrouped
            .keyBy(event -> event.key)
            .process(new FinalStatsProcessor())
            .name("final-stats");
        
        finalStats.print("多级keyBy结果");
        
        env.execute("Multi-Level KeyBy");
    }
    
    /**
     * 类别处理器
     */
    public static class CategoryProcessor extends ProcessFunction<DataEvent, DataEvent> {
        
        @Override
        public void processElement(DataEvent event, Context context, 
                                 Collector<DataEvent> collector) throws Exception {
            // 按类别进行一些预处理
            logger.debug("处理类别: {}, key: {}", event.category, event.key);
            collector.collect(event);
        }
    }
    
    /**
     * 最终统计处理器
     */
    public static class FinalStatsProcessor 
            extends ProcessFunction<DataEvent, Tuple3<String, String, Long>> {
        
        private transient ValueState<Long> keyCountState;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            keyCountState = getRuntimeContext().getState(
                new ValueStateDescriptor<>("key-count", Long.class, 0L));
        }
        
        @Override
        public void processElement(DataEvent event, Context context, 
                                 Collector<Tuple3<String, String, Long>> collector) throws Exception {
            
            Long count = keyCountState.value();
            count++;
            keyCountState.update(count);
            
            collector.collect(new Tuple3<>(event.category, event.key, count));
        }
    }
    
    /**
     * 分区感知的keyBy优化
     */
    public void runPartitionAwareKeyBy() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        
        DataStream<DataEvent> dataStream = env.addSource(new DataEventSource());
        
        // 使用分区感知的keyBy
        SingleOutputStreamOperator<DataEvent> partitionAwareStream = dataStream
            .partitionCustom(new PartitionAwarePartitioner(), event -> event.key)
            .map(new PartitionAwareProcessor())
            .name("partition-aware-processing");
        
        partitionAwareStream.print("分区感知处理结果");
        
        env.execute("Partition-Aware KeyBy");
    }
    
    /**
     * 分区感知分区器
     */
    public static class PartitionAwarePartitioner implements Partitioner<String> {
        
        @Override
        public int partition(String key, int numPartitions) {
            // 根据key的特征选择最优分区
            if (key.startsWith("high_priority_")) {
                return 0; // 高优先级数据分配到第一个分区
            } else if (key.startsWith("batch_")) {
                return numPartitions - 1; // 批处理数据分配到最后一个分区
            } else {
                return Math.abs(key.hashCode()) % numPartitions;
            }
        }
    }
    
    /**
     * 分区感知处理器
     */
    public static class PartitionAwareProcessor extends RichMapFunction<DataEvent, DataEvent> {
        
        private int subtaskIndex;
        
        @Override
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
        }
        
        @Override
        public DataEvent map(DataEvent event) throws Exception {
            // 根据分区索引进行不同的处理
            if (subtaskIndex == 0) {
                // 高优先级处理
                event.priority = Math.max(event.priority, 5);
            } else if (subtaskIndex == 3) {
                // 批处理优化
                event.priority = Math.min(event.priority, 2);
            }
            
            logger.debug("分区 {} 处理事件: {}", subtaskIndex, event.key);
            return event;
        }
    }
    
    /**
     * 数据事件源
     */
    public static class DataEventSource implements SourceFunction<DataEvent> {
        
        private volatile boolean running = true;
        private final Random random = new Random();
        
        @Override
        public void run(SourceContext<DataEvent> ctx) throws Exception {
            String[] keys = {"user_1", "user_2", "user_3", "hot_user_1", "hot_user_2", 
                           "batch_job_1", "high_priority_task_1", "normal_task_1"};
            String[] categories = {"A", "B", "C", "D"};
            String[] sources = {"web", "mobile", "api", "batch"};
            
            while (running) {
                String key = keys[random.nextInt(keys.length)];
                
                // 模拟数据倾斜：某些key出现频率更高
                if (key.startsWith("hot_")) {
                    // 热点数据出现概率更高
                    if (random.nextDouble() < 0.3) {
                        key = "hot_user_1"; // 30%概率选择热点key
                    }
                }
                
                DataEvent event = new DataEvent(
                    key,
                    "value_" + random.nextInt(1000),
                    System.currentTimeMillis(),
                    categories[random.nextInt(categories.length)],
                    random.nextDouble() * 100,
                    sources[random.nextInt(sources.length)],
                    random.nextInt(10) + 1
                );
                
                ctx.collect(event);
                Thread.sleep(random.nextInt(100) + 10); // 10-110ms间隔
            }
        }
        
        @Override
        public void cancel() {
            running = false;
        }
    }
    
    /**
     * 性能监控Sink
     */
    public static class PerformanceMonitoringSink implements SinkFunction<Object> {
        
        private final Map<String, AtomicLong> counters = new ConcurrentHashMap<>();
        private long lastReportTime = System.currentTimeMillis();
        
        @Override
        public void invoke(Object value, Context context) throws Exception {
            String type = value.getClass().getSimpleName();
            counters.computeIfAbsent(type, k -> new AtomicLong(0)).incrementAndGet();
            
            // 每分钟输出一次统计
            long currentTime = System.currentTimeMillis();
            if (currentTime - lastReportTime > 60000) {
                logger.info("性能统计:");
                counters.forEach((k, v) -> 
                    logger.info("  {}: {} records/min", k, v.get()));
                
                counters.clear();
                lastReportTime = currentTime;
            }
        }
    }
    
    public static void main(String[] args) throws Exception {
        KeyByOptimization optimizer = new KeyByOptimization();
        
        // 选择运行的示例
        switch (args.length > 0 ? args[0] : "two-phase") {
            case "two-phase":
                optimizer.runTwoPhaseAggregation();
                break;
            case "dynamic":
                optimizer.runDynamicLoadBalancing();
                break;
            case "multi-level":
                optimizer.runMultiLevelKeyBy();
                break;
            case "partition-aware":
                optimizer.runPartitionAwareKeyBy();
                break;
            default:
                System.out.println("支持的参数: two-phase, dynamic, multi-level, partition-aware");
                break;
        }
    }
} 