package com.huohua.jobschduler;
import com.huohua.jobschduler.mapper.JobMapper;
import com.huohua.jobschduler.model.po.Job;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;

import javax.annotation.Resource;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;

/**
 * @author cyf
 * @version 1.0
 * @description #TODO
 * @date 2024/10/14 13:50
 */
@SpringBootTest
public class ReadDataProcessor {
    private static final int DEFAULT_BATCH_SIZE = 1000;  // 默认每次读取1000行
    private static final int DEFAULT_CHUNK_SIZE = 1024 * 1024 * 10;  // 默认每次映射10MB的数据

    @Resource
    private JobMapper jobMapper;
    @Test
    public void testRead() {
        String csvFilePath = "src/main/test/resources/useCases/data_5000_add251.csv";
        read(csvFilePath, DEFAULT_BATCH_SIZE, DEFAULT_CHUNK_SIZE);
    }
    public void read(String filePath, int batchSize, int chunkSize) {

        try (RandomAccessFile file = new RandomAccessFile(filePath, "r");
             FileChannel fileChannel = file.getChannel()) {

            long fileSize = fileChannel.size();
            long position = 0;
            List<Job> batchList = new ArrayList<>();
            int lineCount = 0;
            String remaining = "";  // 处理跨块时未完全读取的行

            // 逐块读取文件
            while (position < fileSize) {
                long sizeToMap = Math.min(chunkSize, fileSize - position);
                MappedByteBuffer buffer = fileChannel.map(FileChannel.MapMode.READ_ONLY, position, sizeToMap);

                // 读取数据到字节数组并转为字符串
                byte[] data = new byte[(int) sizeToMap];
                buffer.get(data);
                String chunk = new String(data, StandardCharsets.UTF_8);

                // 处理可能跨块的行
                chunk = remaining + chunk;
                String[] lines = chunk.split("\n");

                // 最后一行可能是不完整的，保存到 remaining，等待下一次处理
                remaining = lines[lines.length - 1];

                // 处理所有完整的行
                for (int i = 0; i < lines.length - 1; i++) {
                    String[] fields = lines[i].split(",");
                    if (fields.length == 5) {
                        Job Job = new Job();
                        Job.setId(fields[0]);
                        Job.setInstanceId(fields[1]);
                        Job.setOutputDirection(fields[2]);
                        Job.setDependencyEvent(fields[3]);
                        Job.setInstanceStatus(fields[4]);
                        batchList.add(Job);
                        lineCount++;

                        // 每达到 DEFAULT_BATCH_SIZE，执行批量插入
                        if (lineCount % batchSize == 0) {
                            jobMapper.insertBatch(batchList);
                            batchList.clear();
                        }
                    }
                }

                position += sizeToMap;
            }

            // 插入剩余的记录（包括最后一块的剩余行）
            if (!batchList.isEmpty()) {
                jobMapper.insertBatch(batchList);
            }
            batchList.clear();
            // 处理最后剩下的 remaining 行
            if (!remaining.isEmpty()) {
                String[] fields = remaining.split(",");
                if (fields.length == 5) {
                    Job Job = new Job();
                    Job.setId(fields[0]);
                    Job.setInstanceId(fields[1]);
                    Job.setOutputDirection(fields[2]);
                    Job.setDependencyEvent(fields[3]);
                    Job.setInstanceStatus(fields[4]);
                    batchList.add(Job);

                    // 插入最后的记录
                    if (!batchList.isEmpty()) {
                        jobMapper.insertBatch(batchList);
                    }
                }
            }

        } catch (IOException e) {
            throw new RuntimeException("Error reading file", e);
        }
    }
}
