package com.ruoyi.project.access.service;

import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.io.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

@Service
public class ReadCSV {
    @Autowired
    private DataAttributeExtractionService dataAttributeExtractionService;


    public void importData() {
        String filePath = "C:\\Users\\cheny\\Desktop\\new_code\\RuoYi-Vue-fast-master\\src\\main\\resources\\appliance_chain_data.csv"; // 替换为你的CSV文件名
        String filePath2 = "C:\\Users\\cheny\\Desktop\\new_code\\RuoYi-Vue-fast-master\\src\\main\\resources\\appliance_chain_data_2.csv"; // 替换为你的CSV文件名
        String filePath3 = "C:\\Users\\cheny\\Desktop\\new_code\\RuoYi-Vue-fast-master\\src\\main\\resources\\appliance_chain_data_3.csv"; // 替换为你的CSV文件名
        try (Reader reader = new BufferedReader(new FileReader(filePath));
             CSVParser csvParser = CSVFormat.DEFAULT.withFirstRecordAsHeader().withTrim().parse(reader)) {

            List<CSVRecord> records = csvParser.getRecords();
            Map<String, Integer> headerMap = csvParser.getHeaderMap();
            List<Map<String, Object>> batchDataList = new ArrayList<>();

            int batchSize = 200;  // 设置批处理大小为200行
            int count = 0;

            for (CSVRecord record : records) {
                Map<String, Object> dataMap = new HashMap<>();

                for (String header : headerMap.keySet()) {
                    int columnIndex = headerMap.get(header);
                    Object value = record.get(columnIndex);
                    dataMap.put(header.replaceAll("\uFEFF", ""), value);
                }

                batchDataList.add(dataMap);
                count++;

                // 当累计数据达到批处理大小或处理到文件末尾时执行批处理写操作
                if (count % batchSize == 0 || count == records.size()) {
                    // 处理批处理数据
                    for (Map<String, Object> batchData : batchDataList) {
                        dataAttributeExtractionService.extractDataInfos(batchData);
                    }
                    batchDataList.clear();  // 清空列表以用于下一批次
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        try (Reader reader = new BufferedReader(new FileReader(filePath2));
             CSVParser csvParser = CSVFormat.DEFAULT.withFirstRecordAsHeader().withTrim().parse(reader)) {

            List<CSVRecord> records = csvParser.getRecords();
            Map<String, Integer> headerMap = csvParser.getHeaderMap();
            List<Map<String, Object>> batchDataList = new ArrayList<>();

            int batchSize = 200;  // 设置批处理大小为200行
            int count = 0;

            for (CSVRecord record : records) {
                Map<String, Object> dataMap = new HashMap<>();

                for (String header : headerMap.keySet()) {
                    int columnIndex = headerMap.get(header);
                    Object value = record.get(columnIndex);
                    dataMap.put(header.replaceAll("\uFEFF", ""), value);
                }

                batchDataList.add(dataMap);
                count++;

                // 当累计数据达到批处理大小或处理到文件末尾时执行批处理写操作
                if (count % batchSize == 0 || count == records.size()) {
                    // 处理批处理数据
                    for (Map<String, Object> batchData : batchDataList) {
                        dataAttributeExtractionService.extractDataInfos(batchData);
                    }
                    batchDataList.clear();  // 清空列表以用于下一批次
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        try (Reader reader = new BufferedReader(new FileReader(filePath3));
             CSVParser csvParser = CSVFormat.DEFAULT.withFirstRecordAsHeader().withTrim().parse(reader)) {

            List<CSVRecord> records = csvParser.getRecords();
            Map<String, Integer> headerMap = csvParser.getHeaderMap();
            List<Map<String, Object>> batchDataList = new ArrayList<>();

            int batchSize = 200;  // 设置批处理大小为200行
            int count = 0;

            for (CSVRecord record : records) {
                Map<String, Object> dataMap = new HashMap<>();

                for (String header : headerMap.keySet()) {
                    int columnIndex = headerMap.get(header);
                    Object value = record.get(columnIndex);
                    dataMap.put(header.replaceAll("\uFEFF", ""), value);
                }

                batchDataList.add(dataMap);
                count++;

                // 当累计数据达到批处理大小或处理到文件末尾时执行批处理写操作
                if (count % batchSize == 0 || count == records.size()) {
                    // 处理批处理数据
                    for (Map<String, Object> batchData : batchDataList) {
                        dataAttributeExtractionService.extractDataInfos(batchData);
                    }
                    batchDataList.clear();  // 清空列表以用于下一批次
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
