package com.flink.hbase.multithreaded;

import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 基于 RichParallelSourceFunction 的异步多线程 HBase 读取源
 * 支持 Region 平均分配和异步并发读取
 */
public abstract class HBaseMultithreadedSource<T> extends RichParallelSourceFunction<T> {
    private static final Logger LOG = LoggerFactory.getLogger(HBaseMultithreadedSource.class);
    
    private final String tableName;
    private final String columnFamily;
    private final int pageSize;
    private final int asyncThreads;
    private final long scanTimeout;
    private final Map<String, String> hbaseConfig;
    
    private transient Connection connection;
    private transient Table table;
    private transient ExecutorService executor;
    private transient List<RegionAssignment.RegionInfo> assignedRegions;
    private transient AtomicBoolean isRunning;
    private transient AtomicLong totalRecordsRead;
    
    protected HBaseMultithreadedSource(String tableName, String columnFamily, 
                                     int pageSize, int asyncThreads, 
                                     long scanTimeout, Map<String, String> hbaseConfig) {
        this.tableName = tableName;
        this.columnFamily = columnFamily;
        this.pageSize = pageSize;
        this.asyncThreads = asyncThreads;
        this.scanTimeout = scanTimeout;
        this.hbaseConfig = hbaseConfig;
    }
    
    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);
        
        LOG.info("Opening HBase multithreaded source for table: {}", tableName);
        
        // 初始化 HBase 配置
        org.apache.hadoop.conf.Configuration hbaseConf = HBaseConfiguration.create();
        for (Map.Entry<String, String> entry : hbaseConfig.entrySet()) {
            hbaseConf.set(entry.getKey(), entry.getValue());
        }
        
        // Kerberos 认证
        if (UserGroupInformation.isSecurityEnabled()) {
            UserGroupInformation.setConfiguration(hbaseConf);
            UserGroupInformation.loginUserFromKeytab(
                hbaseConf.get("hbase.security.authentication.principal"),
                hbaseConf.get("hbase.security.authentication.keytab")
            );
        }
        
        // 创建连接
        connection = ConnectionFactory.createConnection(hbaseConf);
        table = connection.getTable(TableName.valueOf(tableName));
        
        // 获取当前任务的 Region 分配
        assignedRegions = getRegionAssignments();
        
        // 初始化线程池
        executor = Executors.newFixedThreadPool(asyncThreads);
        isRunning = new AtomicBoolean(true);
        totalRecordsRead = new AtomicLong(0);
        
        LOG.info("HBase source initialized with {} regions assigned to task {}", 
                assignedRegions.size(), getRuntimeContext().getIndexOfThisSubtask());
    }
    
    @Override
    public void run(SourceContext<T> ctx) throws Exception {
        if (assignedRegions == null || assignedRegions.isEmpty()) {
            LOG.warn("No regions assigned to task {}", getRuntimeContext().getIndexOfThisSubtask());
            return;
        }
        
        // 为每个分配的 Region 创建扫描任务
        List<Future<Void>> futures = new ArrayList<>();
        
        for (RegionAssignment.RegionInfo region : assignedRegions) {
            Future<Void> future = executor.submit(() -> {
                try {
                    scanRegion(region, ctx);
                } catch (Exception e) {
                    LOG.error("Error scanning region: {}", region.getRegionName(), e);
                    throw new RuntimeException(e);
                }
                return null;
            });
            futures.add(future);
        }
        
        // 等待所有任务完成
        try {
            for (Future<Void> future : futures) {
                future.get();
            }
        } catch (Exception e) {
            LOG.error("Error in region scanning tasks", e);
            throw e;
        }
        
        LOG.info("Task {} completed scanning {} regions, total records: {}", 
                getRuntimeContext().getIndexOfThisSubtask(), assignedRegions.size(), totalRecordsRead.get());
    }
    
    @Override
    public void cancel() {
        if (isRunning != null) {
            isRunning.set(false);
        }
        
        if (executor != null && !executor.isShutdown()) {
            executor.shutdown();
            try {
                if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
                    executor.shutdownNow();
                }
            } catch (InterruptedException e) {
                executor.shutdownNow();
                Thread.currentThread().interrupt();
            }
        }
    }
    
    @Override
    public void close() throws Exception {
        cancel();
        
        if (table != null) {
            table.close();
        }
        if (connection != null) {
            connection.close();
        }
        
        super.close();
    }
    
    /**
     * 获取当前任务的 Region 分配
     */
    private List<RegionAssignment.RegionInfo> getRegionAssignments() throws IOException {
        RuntimeContext runtimeContext = getRuntimeContext();
        int taskIndex = runtimeContext.getIndexOfThisSubtask();
        int totalParallelism = runtimeContext.getNumberOfParallelSubtasks();
        
        // 获取表的所有 Region 信息
        List<RegionAssignment.RegionInfo> allRegions = getAllRegions();
        
        // 计算每个任务应该分配的 Region 数量
        int totalRegions = allRegions.size();
        int baseRegionsPerTask = totalRegions / totalParallelism;
        int remainder = totalRegions % totalParallelism;
        
        // 计算当前任务的 Region 范围
        int startIndex = taskIndex * baseRegionsPerTask + Math.min(taskIndex, remainder);
        int endIndex = startIndex + baseRegionsPerTask + (taskIndex < remainder ? 1 : 0);
        
        // 特殊处理：如果无法平均分配，余下的全部放在最后一个分区
        if (taskIndex == totalParallelism - 1) {
            endIndex = totalRegions;
        }
        
        List<RegionAssignment.RegionInfo> assignedRegions = new ArrayList<>();
        for (int i = startIndex; i < endIndex; i++) {
            assignedRegions.add(allRegions.get(i));
        }
        
        LOG.info("Task {}/{} assigned {} regions (index {}-{})", 
                taskIndex, totalParallelism, assignedRegions.size(), startIndex, endIndex - 1);
        
        return assignedRegions;
    }
    
    /**
     * 获取表的所有 Region 信息
     */
    private List<RegionAssignment.RegionInfo> getAllRegions() throws IOException {
        List<RegionAssignment.RegionInfo> regions = new ArrayList<>();
        
        try (RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(tableName))) {
            List<HRegionLocation> regionLocations = regionLocator.getAllRegionLocations();
            
            for (HRegionLocation location : regionLocations) {
                HRegionInfo regionInfo = location.getRegionInfo();
                String serverName = location.getServerName() != null ? 
                    location.getServerName().toString() : "unknown";
                
                regions.add(new RegionAssignment.RegionInfo(
                    regionInfo.getRegionNameAsString(),
                    regionInfo.getStartKey(),
                    regionInfo.getEndKey(),
                    serverName
                ));
            }
        }
        
        // 按 startKey 排序以确保分配的一致性
        regions.sort((r1, r2) -> Bytes.compareTo(r1.getStartKey(), r2.getStartKey()));
        
        return regions;
    }
    
    /**
     * 扫描指定的 Region
     */
    private void scanRegion(RegionAssignment.RegionInfo region, SourceContext<T> ctx) throws IOException {
        LOG.info("Starting to scan region: {}", region.getRegionName());
        
        Scan scan = new Scan();
        scan.setStartRow(region.getStartKey());
        scan.setStopRow(region.getEndKey());
        scan.addFamily(Bytes.toBytes(columnFamily));
        scan.setFilter(new PageFilter(pageSize));
        scan.setMaxResultSize(pageSize * 1024); // 设置最大结果大小
        scan.setCaching(pageSize);
        
        try (ResultScanner scanner = table.getScanner(scan)) {
            Result result;
            int batchCount = 0;
            
            while (isRunning.get() && (result = scanner.next()) != null) {
                try {
                    T record = parseResult(result);
                    if (record != null) {
                        synchronized (ctx) {
                            ctx.collect(record);
                        }
                        totalRecordsRead.incrementAndGet();
                        batchCount++;
                        
                        // 定期输出处理进度
                        if (batchCount % 10000 == 0) {
                            LOG.info("Region {} processed {} records", 
                                    region.getRegionName(), batchCount);
                        }
                    }
                } catch (Exception e) {
                    LOG.error("Error parsing result from region {}", region.getRegionName(), e);
                    // 继续处理下一条记录
                }
            }
            
            LOG.info("Completed scanning region: {}, total records: {}", 
                    region.getRegionName(), batchCount);
        }
    }
    
    /**
     * 解析 HBase Result 为目标对象
     * 子类需要实现此方法
     */
    protected abstract T parseResult(Result result) throws Exception;
} 