package com.gjy.hbase.util;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.nio.file.Files;
import java.util.*;

/**
 * @author gjy
 * @version 1.0
 * @since 2025-09-01 19:43:37
 */
public class HbaseFileManager {
    private static final Logger log = LoggerFactory.getLogger(HbaseFileManager.class);

    // HBase 配置
    private static final String TABLE_NAME = "file_store";
    private static final String CF_META = "meta";
    private static final String CF_DATA = "data";
    private static final String COL_BLOCK_PREFIX = "block_";
    private static final String COL_FILE_NAME = "name";
    private static final String COL_FILE_SIZE = "size";
    private static final String COL_BLOCK = "block";

    // 连接池管理
    private static Connection connection;

    public void init() throws IOException {
        Configuration config = HBaseConfiguration.create();
        config.set("hbase.zookeeper.quorum", "192.168.253.142:2181");
        config.setInt("hbase.client.keyvalue.maxsize", 10485760); // 增大单Cell容量(10MB)
        connection = ConnectionFactory.createConnection(config);
    }

    public void close() throws IOException {
        if (connection != null) connection.close();
    }

    // 创建优化表结构
    public void createTable() throws IOException {
        try (Admin admin = connection.getAdmin()) {
            TableName tableName = TableName.valueOf(TABLE_NAME);
            if (admin.tableExists(tableName)) {
                admin.disableTable(tableName);
                admin.deleteTable(tableName);
            }

            // 带压缩和分块优化的表结构
            TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
                    .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF_META))
                            .setMaxVersions(1)
                            .setCompressionType(Compression.Algorithm.SNAPPY)
                            .build())
                    .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF_DATA))
                            .setMaxVersions(1)
                            .setCompressionType(Compression.Algorithm.SNAPPY)
                            .setBlocksize(256 * 1024) // 256KB 块大小优化
                            .build())
                    .build();
            admin.createTable(tableDesc);
        }
    }

    // 优化文件上传（带分块和大文件支持）
    public void uploadFile(String filePath, String rowKey) throws IOException {
        File file = new File(filePath);
        String fileName = file.getName();
        long fileSize = file.length();
        final int CHUNK_SIZE = 2 * 1024 * 1024; // 2MB 分块

        // 创建元数据Put
        Put metaPut = new Put(Bytes.toBytes(rowKey));
        metaPut.addColumn(Bytes.toBytes(CF_META), Bytes.toBytes(COL_FILE_NAME), Bytes.toBytes(fileName));
        metaPut.addColumn(Bytes.toBytes(CF_META), Bytes.toBytes(COL_FILE_SIZE), Bytes.toBytes(fileSize));

        try (Table table = connection.getTable(TableName.valueOf(TABLE_NAME));
             BufferedInputStream bis = new BufferedInputStream(Files.newInputStream(file.toPath()))) {

            // 批量处理容器
            List<Put> putList = new ArrayList<>();
            putList.add(metaPut);

            byte[] buffer = new byte[CHUNK_SIZE];
            int bytesRead;
            int blockIndex = 0;

            while ((bytesRead = bis.read(buffer)) != -1) {
                // 处理最后一块（可能不满）
                byte[] chunkData = (bytesRead == CHUNK_SIZE) ?
                        buffer : Arrays.copyOf(buffer, bytesRead);

                Put dataPut = new Put(Bytes.toBytes(rowKey));
                dataPut.addColumn(Bytes.toBytes(CF_DATA),
                        Bytes.toBytes(COL_BLOCK + blockIndex++),
                        chunkData);

                putList.add(dataPut);

                // 批量提交（每10块提交一次）
                if (putList.size() >= 10) {
                    table.put(putList);
                    putList.clear();
                }
            }

            // 提交剩余块
            if (!putList.isEmpty()) {
                table.put(putList);
            }

            log.info("文件上传成功! RowKey: {}, 文件名: {}, 大小: {} MB, 分块数: {}", rowKey, fileName, fileSize / (1024.0 * 1024.0), blockIndex);
        }
    }

    // 文件下载功能
    public void downloadFile(String rowKey, String outputPath) throws IOException {
        try (Table table = connection.getTable(TableName.valueOf(TABLE_NAME))) {
            // 获取元数据
            Get metaGet = new Get(Bytes.toBytes(rowKey));
            metaGet.addFamily(Bytes.toBytes(CF_META));
            Result metaResult = table.get(metaGet);

            if (metaResult.isEmpty()) {
                throw new FileNotFoundException("找不到指定文件: " + rowKey);
            }

            String fileName = Bytes.toString(metaResult.getValue(Bytes.toBytes(CF_META), Bytes.toBytes(COL_FILE_NAME)));
            long fileSize = Bytes.toLong(metaResult.getValue(Bytes.toBytes(CF_META), Bytes.toBytes(COL_FILE_SIZE)));

            File outputFile = new File(outputPath, fileName);
            try (BufferedOutputStream bos = new BufferedOutputStream(Files.newOutputStream(outputFile.toPath()))) {

                // 分块读取数据
                Scan blockScan = new Scan().withStartRow(Bytes.toBytes(rowKey)).addFamily(Bytes.toBytes(CF_DATA));

                try (ResultScanner scanner = table.getScanner(blockScan)) {
                    int blockCount = 0;
                    for (Result result : scanner) {
                        if (!result.containsNonEmptyColumn(Bytes.toBytes(CF_DATA), Bytes.toBytes(COL_BLOCK_PREFIX + "00000")))
                            continue;

                        // 按列名排序以确保顺序正确
                        NavigableMap<byte[], byte[]> dataMap = result.getFamilyMap(Bytes.toBytes(CF_DATA));
                        for (Map.Entry<byte[], byte[]> entry : dataMap.entrySet()) {
                            bos.write(entry.getValue());
                            blockCount++;
                        }
                    }
                    log.info("文件下载完成! 文件名: {}, 大小: {} MB, 块数: {}", fileName, fileSize / (1024.0 * 1024.0), blockCount);
                }
            }
        }
    }

}
