package com.flink.hbase.multithreaded;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 基于多线程的异步 HBase Sink
 * 支持并行写入和批量操作
 */
public abstract class HBaseMultithreadedSink<T> extends RichSinkFunction<T> {
    private static final Logger LOG = LoggerFactory.getLogger(HBaseMultithreadedSink.class);
    
    private final String tableName;
    private final String columnFamily;
    private final int batchSize;
    private final int asyncThreads;
    private final long flushInterval;
    private final Map<String, String> hbaseConfig;
    
    private transient Connection connection;
    private transient Table table;
    private transient ExecutorService executor;
    private transient BlockingQueue<T> writeQueue;
    private transient List<Future<Void>> writerTasks;
    private transient AtomicBoolean isRunning;
    private transient AtomicLong totalRecordsWritten;
    private transient ScheduledExecutorService scheduler;
    
    protected HBaseMultithreadedSink(String tableName, String columnFamily, 
                                   int batchSize, int asyncThreads, 
                                   long flushInterval, Map<String, String> hbaseConfig) {
        this.tableName = tableName;
        this.columnFamily = columnFamily;
        this.batchSize = batchSize;
        this.asyncThreads = asyncThreads;
        this.flushInterval = flushInterval;
        this.hbaseConfig = hbaseConfig;
    }
    
    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);
        
        LOG.info("Opening HBase multithreaded sink for table: {}", tableName);
        
        // 初始化 HBase 配置
        org.apache.hadoop.conf.Configuration hbaseConf = HBaseConfiguration.create();
        for (Map.Entry<String, String> entry : hbaseConfig.entrySet()) {
            hbaseConf.set(entry.getKey(), entry.getValue());
        }
        
        // Kerberos 认证
        if (UserGroupInformation.isSecurityEnabled()) {
            UserGroupInformation.setConfiguration(hbaseConf);
            UserGroupInformation.loginUserFromKeytab(
                hbaseConf.get("hbase.security.authentication.principal"),
                hbaseConf.get("hbase.security.authentication.keytab")
            );
        }
        
        // 创建连接
        connection = ConnectionFactory.createConnection(hbaseConf);
        table = connection.getTable(TableName.valueOf(tableName));
        
        // 初始化队列和线程池
        writeQueue = new LinkedBlockingQueue<>(batchSize * asyncThreads * 2);
        executor = Executors.newFixedThreadPool(asyncThreads);
        scheduler = Executors.newScheduledThreadPool(1);
        isRunning = new AtomicBoolean(true);
        totalRecordsWritten = new AtomicLong(0);
        
        // 启动写入任务
        writerTasks = new ArrayList<>();
        for (int i = 0; i < asyncThreads; i++) {
            final int threadId = i;
            Future<Void> task = executor.submit(() -> {
                writeTask(threadId);
                return null;
            });
            writerTasks.add(task);
        }
        
        // 启动定期刷新任务
        scheduler.scheduleAtFixedRate(this::flushTable, flushInterval, flushInterval, TimeUnit.MILLISECONDS);
        
        LOG.info("HBase sink initialized with {} writer threads for task {}", 
                asyncThreads, getRuntimeContext().getIndexOfThisSubtask());
    }
    
    @Override
    public void invoke(T record, Context context) throws Exception {
        if (!isRunning.get()) {
            return;
        }
        
        try {
            // 非阻塞写入队列
            if (!writeQueue.offer(record, 1000, TimeUnit.MILLISECONDS)) {
                LOG.warn("Write queue is full, dropping record: {}", record);
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException("Interrupted while writing to queue", e);
        }
    }
    
    @Override
    public void close() throws Exception {
        LOG.info("Closing HBase multithreaded sink");
        
        // 停止接受新的写入
        isRunning.set(false);
        
        // 等待队列中的数据写入完成
        int timeout = 60;
        while (!writeQueue.isEmpty() && timeout > 0) {
            Thread.sleep(1000);
            timeout--;
        }
        
        // 关闭定时任务
        if (scheduler != null) {
            scheduler.shutdown();
        }
        
        // 关闭写入任务
        if (writerTasks != null) {
            for (Future<Void> task : writerTasks) {
                try {
                    task.get(30, TimeUnit.SECONDS);
                } catch (TimeoutException e) {
                    LOG.warn("Writer task timeout, forcing shutdown");
                    task.cancel(true);
                }
            }
        }
        
        // 关闭线程池
        if (executor != null) {
            executor.shutdown();
            if (!executor.awaitTermination(30, TimeUnit.SECONDS)) {
                executor.shutdownNow();
            }
        }
        
        // 最后一次刷新
        flushTable();
        
        // 关闭连接
        if (table != null) {
            table.close();
        }
        if (connection != null) {
            connection.close();
        }
        
        LOG.info("HBase sink closed, total records written: {}", totalRecordsWritten.get());
        
        super.close();
    }
    
    /**
     * 写入任务
     */
    private void writeTask(int threadId) {
        LOG.info("Writer thread {} started", threadId);
        
        List<Put> batch = new ArrayList<>();
        
        while (isRunning.get() || !writeQueue.isEmpty()) {
            try {
                // 从队列中获取数据
                T record = writeQueue.poll(1000, TimeUnit.MILLISECONDS);
                if (record == null) {
                    // 超时，检查是否还有数据需要写入
                    if (!batch.isEmpty()) {
                        writeBatch(batch, threadId);
                        batch.clear();
                    }
                    continue;
                }
                
                // 转换为 Put 对象
                Put put = convertToPut(record);
                if (put != null) {
                    batch.add(put);
                }
                
                // 批量写入
                if (batch.size() >= batchSize) {
                    writeBatch(batch, threadId);
                    batch.clear();
                }
                
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                break;
            } catch (Exception e) {
                LOG.error("Error in writer thread {}", threadId, e);
                // 继续处理下一条记录
            }
        }
        
        // 写入剩余的数据
        if (!batch.isEmpty()) {
            try {
                writeBatch(batch, threadId);
            } catch (Exception e) {
                LOG.error("Error writing final batch in thread {}", threadId, e);
            }
        }
        
        LOG.info("Writer thread {} finished", threadId);
    }
    
    /**
     * 批量写入数据
     */
    private void writeBatch(List<Put> batch, int threadId) {
        if (batch.isEmpty()) {
            return;
        }
        
        try {
            long startTime = System.currentTimeMillis();
            table.put(batch);
            long endTime = System.currentTimeMillis();
            
            totalRecordsWritten.addAndGet(batch.size());
            
            LOG.debug("Thread {} wrote {} records in {} ms", 
                    threadId, batch.size(), endTime - startTime);
            
        } catch (IOException e) {
            LOG.error("Error writing batch in thread {}", threadId, e);
            
            // 重试逻辑
            int retryCount = 0;
            int maxRetries = 3;
            while (retryCount < maxRetries) {
                try {
                    Thread.sleep(1000 * (retryCount + 1));
                    table.put(batch);
                    totalRecordsWritten.addAndGet(batch.size());
                    LOG.info("Retry {} succeeded for thread {}", retryCount + 1, threadId);
                    break;
                } catch (Exception retryE) {
                    retryCount++;
                    LOG.warn("Retry {} failed for thread {}: {}", retryCount, threadId, retryE.getMessage());
                    if (retryCount >= maxRetries) {
                        LOG.error("Max retries exceeded for thread {}, dropping batch", threadId);
                    }
                }
            }
        }
    }
    
    /**
     * 刷新表
     */
    private void flushTable() {
        try {
            if (table != null) {
                table.close();
                table = connection.getTable(TableName.valueOf(tableName));
            }
        } catch (IOException e) {
            LOG.error("Error flushing table", e);
        }
    }
    
    /**
     * 获取队列统计信息
     */
    public void logQueueStats() {
        if (writeQueue != null) {
            LOG.info("Write queue size: {}, total written: {}", 
                    writeQueue.size(), totalRecordsWritten.get());
        }
    }
    
    /**
     * 转换记录为 Put 对象
     * 子类需要实现此方法
     */
    protected abstract Put convertToPut(T record) throws Exception;
    
    /**
     * 获取列族名称
     */
    protected String getColumnFamily() {
        return columnFamily;
    }
    
    /**
     * 获取表名
     */
    protected String getTableName() {
        return tableName;
    }
} 