package com.wande.dataplatform.filecollection.parser.impl;

import cn.hutool.core.util.StrUtil;
import com.wande.dataplatform.filecollection.common.enums.FileCollectionErrorCode;
import com.wande.dataplatform.filecollection.common.exception.FileCollectionException;
import com.wande.dataplatform.filecollection.domain.dto.FieldInfo;
import com.wande.dataplatform.filecollection.parser.AbstractFileParser;
import com.wande.dataplatform.filecollection.parser.ParserConfig;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.springframework.stereotype.Component;

import java.io.*;
import java.util.*;
import java.util.function.Consumer;

/**
 * CSV文件解析器
 * 支持自定义分隔符、引号字符和编码格式
 * 支持字段自动检测和流式解析
 *
 * @author dataplatform
 * @date 2025-01-24
 */
@Slf4j
@Component
public class CsvFileParser extends AbstractFileParser {

    private static final int SAMPLE_SIZE = 100;

    @Override
    public String getSupportedFileType() {
        return "CSV";
    }

    @Override
    public List<FieldInfo> detectFields(InputStream inputStream, ParserConfig config) {
        validateInputStream(inputStream);
        validateConfig(config);

        List<FieldInfo> fields = new ArrayList<>();

        try (BufferedReader reader = new BufferedReader(
                new InputStreamReader(inputStream, config.getCharset()))) {

            CSVFormat csvFormat = buildCSVFormat(config);
            CSVParser csvParser = csvFormat.parse(reader);

            Iterator<CSVRecord> iterator = csvParser.iterator();
            if (!iterator.hasNext()) {
                return fields;
            }

            // 读取表头
            CSVRecord headerRecord = iterator.next();
            List<String> headers = new ArrayList<>();
            for (String header : headerRecord) {
                headers.add(cleanFieldName(header));
            }

            // 收集样本数据用于类型推断
            Map<Integer, List<String>> samples = new HashMap<>();
            int sampleCount = 0;
            while (iterator.hasNext() && sampleCount < SAMPLE_SIZE) {
                CSVRecord record = iterator.next();
                for (int i = 0; i < record.size() && i < headers.size(); i++) {
                    samples.computeIfAbsent(i, k -> new ArrayList<>()).add(record.get(i));
                }
                sampleCount++;
            }

            // 创建字段信息
            for (int i = 0; i < headers.size(); i++) {
                String fieldName = headers.get(i);
                List<String> fieldSamples = samples.getOrDefault(i, Collections.emptyList());
                String fieldType = inferFieldTypeFromSamples(fieldSamples);

                FieldInfo fieldInfo = createFieldInfo(fieldName, fieldType, i);
                if (!fieldSamples.isEmpty()) {
                    fieldInfo.setSampleValue(fieldSamples.get(0));
                }
                fields.add(fieldInfo);
            }

        } catch (IOException e) {
            log.error("Failed to detect CSV fields", e);
            throw new FileCollectionException(FileCollectionErrorCode.FILE_PARSE_ERROR, "Failed to detect CSV fields: " + e.getMessage());
        }

        return fields;
    }

    @Override
    public Iterator<Map<String, Object>> parse(InputStream inputStream, ParserConfig config) {
        validateInputStream(inputStream);
        validateConfig(config);

        try {
            BufferedReader reader = new BufferedReader(
                    new InputStreamReader(inputStream, config.getCharset()));

            CSVFormat csvFormat = buildCSVFormat(config);
            CSVParser csvParser = csvFormat.parse(reader);

            return new CsvIterator(csvParser, config);

        } catch (IOException e) {
            log.error("Failed to parse CSV file", e);
            throw new FileCollectionException(FileCollectionErrorCode.FILE_PARSE_ERROR, "Failed to parse CSV file: " + e.getMessage());
        }
    }

    @Override
    public void parseStream(InputStream inputStream, ParserConfig config, Consumer<Map<String, Object>> consumer) {
        validateInputStream(inputStream);
        validateConfig(config);

        try (BufferedReader reader = new BufferedReader(
                new InputStreamReader(inputStream, config.getCharset()))) {

            CSVFormat csvFormat = buildCSVFormat(config);
            CSVParser csvParser = csvFormat.parse(reader);

            List<String> headers = new ArrayList<>();
            int recordIndex = 0;

            for (CSVRecord record : csvParser) {
                // 第一行作为表头
                if (recordIndex == 0 && config.getHasHeader()) {
                    for (String header : record) {
                        headers.add(cleanFieldName(header));
                    }
                    recordIndex++;
                    continue;
                }

                // 如果没有表头，使用列索引作为字段名
                if (headers.isEmpty()) {
                    for (int i = 0; i < record.size(); i++) {
                        headers.add("column_" + i);
                    }
                }

                Map<String, Object> rowData = new LinkedHashMap<>();
                for (int i = 0; i < record.size() && i < headers.size(); i++) {
                    String value = processFieldValue(record.get(i), config);
                    rowData.put(headers.get(i), value);
                }

                // 跳过空行
                if (config.getSkipEmptyRows() && isEmptyRow(rowData)) {
                    continue;
                }

                consumer.accept(rowData);
                recordIndex++;
            }

        } catch (IOException e) {
            log.error("Failed to parse CSV file in stream mode", e);
            throw new FileCollectionException(FileCollectionErrorCode.FILE_PARSE_ERROR, "Failed to parse CSV file in stream mode: " + e.getMessage());
        }
    }

    @Override
    public boolean validate(InputStream inputStream, ParserConfig config) {
        try (BufferedReader reader = new BufferedReader(
                new InputStreamReader(inputStream, config.getCharset()))) {

            CSVFormat csvFormat = buildCSVFormat(config);
            CSVParser csvParser = csvFormat.parse(reader);

            // 尝试读取第一行
            Iterator<CSVRecord> iterator = csvParser.iterator();
            return iterator.hasNext();

        } catch (Exception e) {
            log.warn("CSV validation failed: {}", e.getMessage());
            return false;
        }
    }

    /**
     * 构建CSV格式配置
     *
     * @param config 解析器配置
     * @return CSV格式
     */
    private CSVFormat buildCSVFormat(ParserConfig config) {
        CSVFormat.Builder builder = CSVFormat.DEFAULT.builder()
                .setDelimiter(config.getDelimiter())
                .setQuote(config.getQuoteChar())
                .setEscape(config.getEscapeChar())
                .setIgnoreEmptyLines(config.getSkipEmptyRows())
                .setTrim(config.getTrimFields());

        if (config.getHasHeader()) {
            builder.setSkipHeaderRecord(false);
        }

        return builder.build();
    }

    /**
     * CSV迭代器
     */
    private class CsvIterator implements Iterator<Map<String, Object>> {
        private final CSVParser csvParser;
        private final ParserConfig config;
        private final Iterator<CSVRecord> recordIterator;
        private final List<String> headers;
        private int recordIndex;

        public CsvIterator(CSVParser csvParser, ParserConfig config) {
            this.csvParser = csvParser;
            this.config = config;
            this.recordIterator = csvParser.iterator();
            this.headers = new ArrayList<>();
            this.recordIndex = 0;

            // 读取表头
            if (recordIterator.hasNext() && config.getHasHeader()) {
                CSVRecord headerRecord = recordIterator.next();
                for (String header : headerRecord) {
                    headers.add(cleanFieldName(header));
                }
                recordIndex++;
            }
        }

        @Override
        public boolean hasNext() {
            return recordIterator.hasNext();
        }

        @Override
        public Map<String, Object> next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }

            CSVRecord record = recordIterator.next();

            // 如果没有表头，使用列索引作为字段名
            if (headers.isEmpty()) {
                for (int i = 0; i < record.size(); i++) {
                    headers.add("column_" + i);
                }
            }

            Map<String, Object> rowData = new LinkedHashMap<>();
            for (int i = 0; i < record.size() && i < headers.size(); i++) {
                String value = processFieldValue(record.get(i), config);
                rowData.put(headers.get(i), value);
            }

            recordIndex++;
            return rowData;
        }
    }
}
