package com.orc.converter;

import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.storage.ql.exec.vector.*;
import com.opencsv.CSVReader;
import com.opencsv.exceptions.CsvException;

import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;

/**
 * ORC转换器，负责将CSV文件转换为ORC文件
 */
public class OrcConverter {
    private static final int BATCH_SIZE = 10000;  // 批处理大小

    /**
     * 将CSV文件转换为ORC文件
     */
    public void convert(TableConfig config) throws IOException, CsvException {
        // 确保输出目录存在
        File outputDir = new File(config.getOutputDirectory());
        if (!outputDir.exists()) {
            outputDir.mkdirs();
        }

        // 扫描输入文件
        FileScanner scanner = new FileScanner();
        List<File> inputFiles = scanner.scanFiles(config.getInputDirectory(), config.getFilePattern());
        
        if (inputFiles.isEmpty()) {
            System.out.println("没有找到符合条件的文件: " + config.getInputDirectory() + "/" + config.getFilePattern());
            return;
        }
        
        System.out.println("找到 " + inputFiles.size() + " 个文件需要处理");

        // 创建ORC schema
        TypeDescription schema = createOrcSchema(config.getFields());
        System.out.println("ORC Schema: " + schema);

        // 获取压缩方式
        CompressionKind compression = getCompressionKind(config.getCompression());

        // 处理所有文件
        processFiles(inputFiles, config, schema, compression);
    }

    /**
     * 处理多个文件，控制输出文件大小
     */
    private void processFiles(List<File> inputFiles, TableConfig config, 
                             TypeDescription schema, CompressionKind compression) 
                             throws IOException, CsvException {
        
        int fileIndex = 0;
        Writer writer = createNewWriter(config, schema, compression, fileIndex);
        long currentFileSize = 0;
        
        VectorizedRowBatch batch = schema.createRowBatch(BATCH_SIZE);
        
        for (File inputFile : inputFiles) {
            System.out.println("处理文件: " + inputFile.getAbsolutePath());
            
            try (CSVReader reader = new CSVReader(new FileReader(inputFile))) {
                List<String[]> allRows = reader.readAll();
                int startIndex = config.isHasHeader() ? 1 : 0;  // 跳过标题行

                for (int i = startIndex; i < allRows.size(); i++) {
                    String[] row = allRows.get(i);
                    
                    // 检查是否需要创建新文件
                    if (currentFileSize >= config.getMaxFileSize()) {
                        writer.close();
                        fileIndex++;
                        writer = createNewWriter(config, schema, compression, fileIndex);
                        currentFileSize = 0;
                        batch.reset();
                    }

                    // 添加行到批处理
                    if (!addRowToBatch(row, batch, config.getFields(), i)) {
                        continue;  // 跳过错误行
                    }

                    // 当批处理满了就写入ORC文件
                    if (batch.size == BATCH_SIZE) {
                        writer.addRowBatch(batch);
                        currentFileSize += estimateBatchSize(batch);  // 估算大小
                        batch.reset();
                    }
                }
            }
        }

        // 写入剩余的数据
        if (batch.size > 0) {
            writer.addRowBatch(batch);
        }
        
        writer.close();
    }

    /**
     * 创建新的ORC写入器
     */
    private Writer createNewWriter(TableConfig config, TypeDescription schema, 
                                  CompressionKind compression, int fileIndex) throws IOException {
        String fileName = String.format("%s_%d.orc", config.getTableName(), fileIndex);
        Path outputPath = Paths.get(config.getOutputDirectory(), fileName);
        
        System.out.println("创建新的ORC文件: " + outputPath);
        
        return OrcFile.createWriter(
            outputPath,
            OrcFile.writerOptions(null)
                   .setSchema(schema)
                   .compress(compression)
        );
    }

    /**
     * 创建ORC的Schema
     */
    private TypeDescription createOrcSchema(List<FieldConfig> fields) {
        StringBuilder schemaBuilder = new StringBuilder("struct<");
        
        for (int i = 0; i < fields.size(); i++) {
            FieldConfig field = fields.get(i);
            schemaBuilder.append(field.getName()).append(":").append(convertType(field.getType()));
            
            if (i < fields.size() - 1) {
                schemaBuilder.append(",");
            }
        }
        
        schemaBuilder.append(">");
        return TypeDescription.fromString(schemaBuilder.toString());
    }

    /**
     * 将配置中的类型转换为ORC支持的类型
     */
    private String convertType(String type) {
        switch (type.toLowerCase()) {
            case "int":
            case "integer":
                return "int";
            case "long":
            case "bigint":
                return "bigint";
            case "string":
                return "string";
            case "double":
            case "float":
                return "double";
            case "boolean":
                return "boolean";
            case "date":
                return "date";
            case "timestamp":
                return "timestamp";
            default:
                System.out.println("未知类型 " + type + "，默认使用string类型");
                return "string";
        }
    }

    /**
     * 将CSV行添加到批处理中
     */
    private boolean addRowToBatch(String[] row, VectorizedRowBatch batch, 
                                 List<FieldConfig> fields, int rowNumber) {
        try {
            int rowIdx = batch.size++;
            
            for (int colIdx = 0; colIdx < fields.size(); colIdx++) {
                FieldConfig field = fields.get(colIdx);
                String value = row[colIdx];
                
                // 处理空值
                if (value == null || value.trim().isEmpty()) {
                    batch.cols[colIdx].noNulls = false;
                    batch.cols[colIdx].isNull[rowIdx] = true;
                    continue;
                }
                
                // 根据字段类型设置值
                switch (field.getType().toLowerCase()) {
                    case "int":
                    case "integer":
                        LongColumnVector intVector = (LongColumnVector) batch.cols[colIdx];
                        intVector.vector[rowIdx] = Long.parseLong(value);
                        break;
                    case "long":
                    case "bigint":
                        LongColumnVector longVector = (LongColumnVector) batch.cols[colIdx];
                        longVector.vector[rowIdx] = Long.parseLong(value);
                        break;
                    case "string":
                        BytesColumnVector stringVector = (BytesColumnVector) batch.cols[colIdx];
                        stringVector.setVal(rowIdx, value.getBytes());
                        break;
                    case "double":
                    case "float":
                        DoubleColumnVector doubleVector = (DoubleColumnVector) batch.cols[colIdx];
                        doubleVector.vector[rowIdx] = Double.parseDouble(value);
                        break;
                    case "boolean":
                        LongColumnVector booleanVector = (LongColumnVector) batch.cols[colIdx];
                        booleanVector.vector[rowIdx] = Boolean.parseBoolean(value) ? 1 : 0;
                        break;
                    default:
                        BytesColumnVector defaultVector = (BytesColumnVector) batch.cols[colIdx];
                        defaultVector.setVal(rowIdx, value.getBytes());
                }
            }
            return true;
        } catch (Exception e) {
            System.err.println("处理行 " + (rowNumber + 1) + " 时出错: " + e.getMessage());
            batch.size--;  // 回滚
            return false;
        }
    }

    /**
     * 估算批处理数据的大小（字节）
     */
    private long estimateBatchSize(VectorizedRowBatch batch) {
        // 简单估算，实际大小可能有所不同
        long estimatedSize = 0;
        
        for (ColumnVector vector : batch.cols) {
            if (vector instanceof BytesColumnVector) {
                // 字符串类型，假设平均每个值100字节
                estimatedSize += batch.size * 100;
            } else if (vector instanceof LongColumnVector) {
                // 长整型，每个8字节
                estimatedSize += batch.size * 8;
            } else if (vector instanceof DoubleColumnVector) {
                // 双精度浮点型，每个8字节
                estimatedSize += batch.size * 8;
            } else {
                // 其他类型，默认按8字节估算
                estimatedSize += batch.size * 8;
            }
        }
        
        return estimatedSize;
    }

    /**
     * 获取压缩方式枚举
     */
    private CompressionKind getCompressionKind(String compression) {
        switch (compression.toUpperCase()) {
            case "NONE":
                return CompressionKind.NONE;
            case "ZLIB":
                return CompressionKind.ZLIB;
            case "SNAPPY":
                return CompressionKind.SNAPPY;
            case "LZ4":
                return CompressionKind.LZ4;
            case "ZSTD":
                return CompressionKind.ZSTD;
            default:
                System.out.println("未知压缩方式 " + compression + "，默认使用SNAPPY");
                return CompressionKind.SNAPPY;
        }
    }
}
    