package org.groupg.filetodb;

import com.lmax.disruptor.RingBuffer;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.Reader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;

public class CsvBatchProducer {
    private static final Logger log = LoggerFactory.getLogger(CsvBatchProducer.class);
    private final RingBuffer<CsvBatchEvent> ringBuffer;
    private List<CSVRecord> currentBatch = new ArrayList<>(10000);

    public CsvBatchProducer(RingBuffer<CsvBatchEvent> ringBuffer) {
        this.ringBuffer = ringBuffer;
    }

    public void processCsv(Path csvPath) throws IOException {
        try (Reader reader = Files.newBufferedReader(csvPath);
             CSVParser parser = new CSVParser(reader, CSVFormat.DEFAULT)) {
            log.info("Parsing CSV file");
            AtomicInteger batchCount = new AtomicInteger(0);
            parser.stream().forEach(record -> {
                currentBatch.add(record);

                if (currentBatch.size() >= 10000) {
                    publishBatch(batchCount.getAndIncrement());
                    currentBatch.clear();
                }
            });

            // 处理剩余记录
            if (!currentBatch.isEmpty()) {
                publishBatch(batchCount.get());
            }
        }
    }

    private void publishBatch(int batchNum) {
        long sequence = ringBuffer.next();
        try {
            CsvBatchEvent event = ringBuffer.get(sequence);
            event.setRecords(new ArrayList<>(currentBatch));
            event.setBatchNumber(batchNum);
        } finally {
            ringBuffer.publish(sequence);
            System.out.printf("Published batch %d (%d records)%n",
                batchNum, currentBatch.size());
        }
    }
}
