package org.execute.tool.使用.A提取csv中关键词;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.stream.Collectors;

public class Functioning {
    public static void main(String[] args) {
        System.out.println("提取CSV文件中的指定列数据 生成sql开始");
        String csvFilePath = "E:\\Downloads\\2025年1月21日204450.csv";
        int[] columnIndices = {0, 4};
//        int[] columnIndices = {0};
        /**
         * 提取CSV文件中的指定列数据
         */
        Map<Integer, List<String>> columnDataMap = CSVColumnExtractor.extractColumnsFromCSV(csvFilePath, columnIndices);


        /**
         * 将提取的列数据进行处理
         */
        List<String> strings = new ArrayList<>();
        strings.addAll(columnDataMap.get(0));
        // 拆分
        List<String> strings2 = splitColumnData( columnDataMap.get(4));
        strings.addAll(strings2);
        // 去重
        Set<String> set = new HashSet<>(strings);

        // 按比例筛选 中文
        ArrayList<String> screening = new ArrayList<>();
        for (String s : set) {
            double v = StringProcessing1.calculateChinesePercentage(s);
            if(v<0.1){
                continue;
            }
            screening.add(s);
        }

        // 繁体转简体
        List<String> simplified = StringProcessing1.simplifiedToTraditional(screening);
        // 简体转繁体
        List<String> traditional = StringProcessing1.traditionalToSimplified(screening);
        //合并
        ArrayList<String> mergeList = new ArrayList<>();
        mergeList.addAll(simplified);
        mergeList.addAll(traditional);
        mergeList.addAll(screening);
        // 二次去重
        Set<String> secondaryDeduplication = new HashSet<>(mergeList);


        //去除单字
        List<String> secondaryDeduplicationList = secondaryDeduplication.stream()
                .filter(s -> s.length() > 0)
                .collect(Collectors.toList());




        // 提取最小单元
        List<String> strings1 = StringProcessing1.theSmallestUnit(secondaryDeduplicationList);



        /**
         * 转为sql
         */
        //将数据分成多个批次
        List<List<String>> batches = splitIntoBatches(strings1, 100);
        // 将每个批次的数据转换为SQL语句
        ArrayList<String> sqlString = new ArrayList<>();
        for (List<String> batch : batches) {
            StringBuilder sql = new StringBuilder();
            sql.append("INSERT IGNORE INTO jx_keyword_config (keyword) VALUES ");
            boolean first = true;
            for (String s : batch) {
                if (!first) {
                    sql.append(", ");
                }
                sql.append("('").append(s).append("')");
                first = false;
            }
            sql.append(";");
            System.out.println(sql.toString());
            sqlString.add(sql.toString());
        }
        /**
         * 将sql写入文件
         */
        String sqlFile = writeSQLToFile(sqlString, csvFilePath);


        /**
         * 提取长度大于4的关键词
         */
        //提取长度大于4的关键词
        List<String> theExtractionLengthIsGreaterThan4 = strings1.stream()
                .filter(s -> s.length() >=5)
                .collect(Collectors.toList());
        //将提取出来的关键词写入文件
        String keywordFile = writeSQLToFile(theExtractionLengthIsGreaterThan4, csvFilePath);

        /**
         * 解析后的关键词
         */
        //解析后的关键词
        String parsedKeywordsFile = writeSQLToFile(strings1, csvFilePath);
        String notDeduplicated = writeSQLToFile(screening, csvFilePath);
        System.out.println("提取CSV文件中的指定列数据 生成sql结束");
        System.out.println("原文件路径为：  \n   " + csvFilePath);
        System.out.println("未解析后的关键词文件路径为：  \n   " + notDeduplicated);
        System.out.println("解析后的关键词文件路径为：  \n   " + parsedKeywordsFile);
        System.out.println("提取长度大于4的关键词：  \n   " + keywordFile);
        System.out.println("Sql文件路径为：  \n   " + sqlFile);

    }

    public  static List<String> splitColumnData(List<String> columnData) {
        List<String> splitColumnData = new ArrayList<>();
        if (columnData==null||columnData.size()<1) {
            return splitColumnData;
        }

        for (String data : columnData) {
            String[] splitData = data.split(",");
            for (String s : splitData) {
                splitColumnData.add(s);
            }
        }
        return splitColumnData;
    }

    public static List<List<String>> splitIntoBatches(List<String> list, int batchSize) {
        List<List<String>> batches = new ArrayList<>();
        for (int i = 0; i < list.size(); i += batchSize) {
            batches.add(list.subList(i, Math.min(list.size(), i + batchSize)));
        }
        return batches;
    }
    public static String  writeSQLToFile(List<String> sqlStrings, String csvFilePath) {
        File csvFile = new File(csvFilePath);
//        SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
//        String format = sdf.format(new Date());
        String randomString = UUID.randomUUID().toString();

        String outputFilePath = csvFile.getParent() + File.separator + csvFile.getName().replace(".csv", "_" + randomString + ".txt");
        try (BufferedWriter writer = new BufferedWriter(new FileWriter(outputFilePath))) {
            for (String sqlString : sqlStrings) {
                writer.write(sqlString);
                writer.newLine();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return outputFilePath;
    }
}
