package com.minglead.crawler.service.impl;

import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.minglead.crawler.constant.YqCrawlerConstant;
import com.minglead.crawler.constant.YqModelAnalysisConstant;
import com.minglead.crawler.entity.*;
import com.minglead.crawler.enums.*;
import com.minglead.crawler.exception.BizCommonException;
import com.minglead.crawler.feign.PriceFeignClient;
import com.minglead.crawler.service.*;
import com.minglead.crawler.shell.GosShell;
import com.minglead.crawler.util.*;
import com.opencsv.CSVReader;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;

import java.io.*;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.text.SimpleDateFormat;
import java.time.YearMonth;
import java.time.ZoneId;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;

@Configuration
@EnableScheduling
public class YqModelAnalysisTaskImpl {

    @Autowired
    private YqModelAnalysisService yqModelAnalysisService;
    @Autowired
    private YqBasicInfoService yqBasicInfoService;
    @Autowired
    private YqBasicInfoDetailService yqBasicInfoDetailService;
    @Autowired
    private YqCrawlerService yqCrawlerService;
    @Autowired
    private DcDataBigmercGrabLogService dcDataBigmercGrabLogService;
    @Autowired
    private DcDataMacroIndicatorDataService dcDataMacroIndicatorDataService;
    @Autowired
    private PriceFeignClient priceFeignClient;

    //暂存广东省统计局蔬菜瓜果生产情况的一级指标名
    private String situationFirstIndicatorName = "";

    //爬虫停止状态下 定时任务默认间隔时间 30秒
    private final int stopStatusTime = 30000;

    //爬虫录入任务
    public long enterTask(CrawlerMediaEnum e) {
        //解析csv结果文件录入数据库
        long l1 = System.currentTimeMillis();
        List<YqCrawlerEntity> crawlerEntityList = yqCrawlerService.list();
        if (crawlerEntityList.isEmpty()) {
            throw new BizCommonException("媒体:" + e.getDesc() + "未找到有效媒体配置");
        }
        boolean flag = false;
        for (YqCrawlerEntity crawlerEntity : crawlerEntityList) {
            if (e.getDesc().equals(crawlerEntity.getName())) {
                if (CrawlerStateEnum.STOPPED.getCode().equals(String.valueOf(crawlerEntity.getState()))) {
                    System.out.println("媒体:" + e.getDesc() + " 爬虫已停用==========");
                    return stopStatusTime;
                } else {
                    System.out.println("媒体:" + e.getDesc() + "新闻数据录入定时任务开始==========");
                    //执行爬虫
                    try {
                        switch (e) {
                            case FINANCESINA:
                                //爬取数据、更新媒体配置、获取结果文件解析后对象 录入
                                //新浪财经csv结果文件表头(名称,最新价,人民币报价,涨跌额,涨跌幅,开盘价,最高价,最低价,昨日结算价,持仓量,买价,卖价,行情时间)
                                financeSinaCrawlData(crawlerEntity, e.getDesc());
                                break;
                            case CBOT:
                                //芝加哥期货交易所csv结果文件表头(名称,最新价,涨跌,涨跌幅,昨收,今开,最高,最低,买入,买量,卖出,卖量,成交量,持仓量)
                                cbotCrawlData(crawlerEntity, e.getDesc());
                                break;
                            case INEDAILY:
                                //上海国际能源交易中心csv结果文件表头
                                //结果文件1(商品名称,交割月份,结算价,交易手续费率(‰),交易手续费额(元/手),交割手续费率,    买投机交易保证金率,买套保交易保证金率,卖投机交易保证金率,卖套保交易保证金率
                                //结果文件2(商品名称,交割月份,结算价,交易手续费率(‰),交易手续费额(元/点),交割手续费率(‰),买投机交易保证金率,买套保交易保证金率,卖投机交易保证金率,卖套保交易保证金率)
                                ineDailyCrawlData(crawlerEntity, e.getDesc());
                                break;
                            case GD_NATIONAL:
                                //爬取数据、更新媒体配置、获取结果文件解析后对象 录入、归档
                                //广东省统计局国民经济主要指标csv结果文件表头(指标 ,1-9月,增长%)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdNationalBasePath
                                        , YqModelAnalysisConstant.GD_NATIONAL_RET_PATH, YqModelAnalysisConstant.gdNationalGuidangDir, YqModelAnalysisConstant.GD_NATIONAL_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_NATIONAL, CrawlerStatisticsTypeOfGDProvEnum.NATIONAL, CrawlerStatisticsTypeOfGDProvEnum.NATIONAL);
                                break;
                            case GD_QUARTER:
                                //广东省统计局广东省季度生产总值csv结果文件表头(指标,总量（亿元）_本年,总量（亿元）_上年,增长（%）_本年)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdQuarterBasePath
                                        , YqModelAnalysisConstant.GD_QUARTER_RET_PATH, YqModelAnalysisConstant.gdQuarterGuidangDir, YqModelAnalysisConstant.GD_QUARTERL_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_NATIONAL, CrawlerStatisticsTypeOfGDProvEnum.QUARTER, CrawlerStatisticsTypeOfGDProvEnum.QUARTER);
                                break;
                            case GD_SITUATION:
                                //广东省统计局蔬菜瓜果生产情况csv结果文件表头(指标名称,播种面积（万亩）,产量（万吨）,播种面积增速（%）,产量增速（%）)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdSituationBasePath
                                        , YqModelAnalysisConstant.GD_SITUATION_RET_PATH, YqModelAnalysisConstant.gdSituationGuidangDir, YqModelAnalysisConstant.GD_SITUATION_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_SITUATION, CrawlerStatisticsTypeOfGDProvEnum.SITUATION, CrawlerStatisticsTypeOfGDProvEnum.SITUATION);
                                break;
                            case GD_ADD:
                                //广东省统计局工业增加值csv结果文件表头(指标名称,本月,本月止累计,本月比上年同月增长（%）,累计比上年同期增长（%）)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdAddBasePath
                                        , YqModelAnalysisConstant.GD_ADD_RET_PATH, YqModelAnalysisConstant.gdAddGuidangDir, YqModelAnalysisConstant.GD_ADD_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_ADD, CrawlerStatisticsTypeOfGDProvEnum.ADD, CrawlerStatisticsTypeOfGDProvEnum.ADD);
                                break;
                            case GD_PRODUCTION:
                                //广东省统计局主要产品产量csv结果文件表头(产品名称,计量单位,本月,本月止累计,本月比上年同月增长（%）,累计比上年同期增长（%）)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdProductionBasePath
                                        , YqModelAnalysisConstant.GD_PRODUCTION_RET_PATH, YqModelAnalysisConstant.gdProductionGuidangDir, YqModelAnalysisConstant.GD_PRODUCTION_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_PRODUCTION, CrawlerStatisticsTypeOfGDProvEnum.PRODUCTION, CrawlerStatisticsTypeOfGDProvEnum.PRODUCTION);
                                break;
                            case GD_INDUSTRY:
                                //广东省统计局工业主要经济指标csv结果文件表头
                                //_0_: (指标名称,计量单位,本月止累计,累计比上年同期增长（%）,大型企业_本月止累计,大型企业_累计比上年同期增长（%）)
                                //_1_: (指标名称,计量单位,中型企业_本月止累计,中型企业_累计比上年同期增长（%）,小型和微型企业_本月止累计,小型和微型企业_累计比上年同期增长（%）)
                                //_2_: (指标名称,计量单位,国有控股企业_本月止累计,国有控股企业_累计比上年同期增长（%）,外商及港澳台投资企业_本月止累计,外商及港澳台投资企业_累计比上年同期增长（%）)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdIndustryBasePath
                                        , YqModelAnalysisConstant.GD_INDUSTRY_RET_PATH, YqModelAnalysisConstant.gdIndustryGuidangDir, YqModelAnalysisConstant.GD_INDUSTRY_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_INDUSTRY, CrawlerStatisticsTypeOfGDProvEnum.INDUSTRY, CrawlerStatisticsTypeOfGDProvEnum.INDUSTRY);
                                break;
                            case GD_TOTAL:
                                //广东省统计局社会消费品零售总额csv结果文件表头(项目,本年_本月,本年_1-本月,上年同期_上年本月,上年同期_上年1-本月,增长_本月,增长_累计)
                                gdProvStatisticsCrawlData(crawlerEntity, YqModelAnalysisConstant.gdTotalBasePath
                                        , YqModelAnalysisConstant.GD_TOTAL_RET_PATH, YqModelAnalysisConstant.gdTotalGuidangDir, YqModelAnalysisConstant.GD_TOTAL_ADDRESS
                                        , CrawlerDataCategoreEnum.GD_TOTAL, CrawlerStatisticsTypeOfGDProvEnum.TOTAL, CrawlerStatisticsTypeOfGDProvEnum.TOTAL);
                                break;
                            default:
                                throw new BizCommonException("媒体:" + e.getDesc() + "未找到有效媒体配置");
                        }
                        //再次判断媒体配置状态
                        YqCrawlerEntity overCrawlerEntity = yqCrawlerService.getById(crawlerEntity.getId());
                        if (CrawlerStateEnum.STOPPED.getCode().equals(String.valueOf(overCrawlerEntity.getState()))) {
                            return stopStatusTime;
                        }
                    } catch (Exception ex) {
                        ex.printStackTrace();
                        System.out.println("媒体:" + e.getDesc() + "数据录入出错============ 耗时：" + (System.currentTimeMillis() - l1));
                        return (System.currentTimeMillis() - l1);
                    }
                }
                flag = true;
                break;
            }
        }
        if (!flag) {
            throw new BizCommonException("媒体:" + e.getDesc() + "未找到有效媒体配置");
        }
        System.out.println("媒体:" + e.getDesc() + "新闻模型分析定时任务结束============ 耗时：" + (System.currentTimeMillis() - l1));
        return (System.currentTimeMillis() - l1);
    }

    //模型分析任务
    public long analysisTask(CrawlerMediaEnum e) {
        long l1 = System.currentTimeMillis();
        List<YqCrawlerEntity> crawlerEntityList = yqCrawlerService.list();
        if (crawlerEntityList.isEmpty()) {
            throw new BizCommonException("媒体:" + e.getDesc() + "未找到有效媒体配置");
        }
        boolean flag = false;
        for (YqCrawlerEntity crawlerEntity : crawlerEntityList) {
            if (e.getDesc().equals(crawlerEntity.getName())) {
                if (CrawlerStateEnum.STOPPED.getCode().equals(String.valueOf(crawlerEntity.getState()))) {
                    System.out.println("媒体:" + e.getDesc() + " 爬虫已停用==========");
                    return stopStatusTime;
                } else {
                    System.out.println("媒体:" + e.getDesc() + "新闻模型分析定时任务开始==========");
                    //执行爬虫
                    List<YqMediaEntity> mediaEntityList;
                    try {
                        switch (e) {
                            case TOUTIAO:
                                //爬取数据、更新媒体配置、获取结果文件解析后对象
                                mediaEntityList = toutiaoCrawlData(crawlerEntity);
                                break;
                            case WEIBO:
                                mediaEntityList = weiboCrawlData(crawlerEntity);
                                break;
                            case XIAOHONGSHU:
                                mediaEntityList = xiaohongshuCrawlData(crawlerEntity);
                                break;
                            case CHINANEWSNETWORK:
                                mediaEntityList = chinaNewsNetworkCrawlData(crawlerEntity);
                                break;
                            case XINHUA:
                                mediaEntityList = xinhuaCrawlData(crawlerEntity);
                                break;
                            case TENCENT:
                                mediaEntityList = tencentCrawlData(crawlerEntity);
                                break;
                            case SOUGOUWECHAT:
                                mediaEntityList = sougouWechatCrawlData(crawlerEntity);
                                break;
                            default:
                                throw new BizCommonException("媒体:" + e.getDesc() + "未找到有效媒体配置");
                        }
                        if (mediaEntityList.isEmpty()) {
                            System.out.println("媒体:" + e.getDesc() + "未爬取到任何数据==========");
                            System.out.println("媒体:" + e.getDesc() + "新闻模型分析定时任务结束============ 耗时：" + (System.currentTimeMillis() - l1));
                            return stopStatusTime;
                        }
                        //模型分析并插入数据库和ES
                        modelAnalysisInsertDBAndES(mediaEntityList);
                        //再次判断媒体配置状态
                        YqCrawlerEntity overCrawlerEntity = yqCrawlerService.getById(crawlerEntity.getId());
                        if (CrawlerStateEnum.STOPPED.getCode().equals(String.valueOf(overCrawlerEntity.getState()))) {
                            return stopStatusTime;
                        }
                    } catch (Exception ex) {
                        System.out.println(ex.getMessage());
                        System.out.println("媒体:" + e.getDesc() + "新闻模型分析定时任务结束============ 耗时：" + (System.currentTimeMillis() - l1));
                        return (System.currentTimeMillis() - l1);
                    }
                }
                flag = true;
                break;
            }
        }
        if (!flag) {
            throw new BizCommonException("媒体:" + e.getDesc() + "未找到有效媒体配置");
        }
        System.out.println("媒体:" + e.getDesc() + "新闻模型分析定时任务结束============ 耗时：" + (System.currentTimeMillis() - l1));
        return (System.currentTimeMillis() - l1);
    }

    //头条爬虫
    private List<YqMediaEntity> toutiaoCrawlData(YqCrawlerEntity entity) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.toutiaoBasePath + File.separator + "toutiao.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        toutiaoShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.TOUTIAO_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.TOUTIAO_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return toutiaoParseCsvFile(result.getResultCsv());
    }

    //微博爬虫(一周要换一次cookie)
    private List<YqMediaEntity> weiboCrawlData(YqCrawlerEntity entity) {
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, "");
        //写入脚本
        weiboShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.WEIBO_RET_PATH);
        //开始爬虫
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.WEIBO_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return weiboParseCsvFile(result.getResultCsv());
    }

    //小红书爬虫（取消）
    private List<YqMediaEntity> xiaohongshuCrawlData(YqCrawlerEntity entity) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.redBookBasePath + File.separator + "main.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        xiaohongshuShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.XIAOHONGSHU_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.XIAOHONGSHU_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return xiaohongshuParseCsvFile(result.getResultCsv());
    }

    //中国新闻网爬虫
    private List<YqMediaEntity> chinaNewsNetworkCrawlData(YqCrawlerEntity entity) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.chinaNewsNetworkBasePath + File.separator + "chinanews.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        chinaNewsNetworkShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.CHINANEWSNETWORK_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.CHINANEWSNETWORK_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return chinaNewsNetworkParseCsvFile(result.getResultCsv());
    }

    //新华网爬虫
    private List<YqMediaEntity> xinhuaCrawlData(YqCrawlerEntity entity) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.xinhuaBasePath + File.separator + "xinhuanet.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        xinhuaShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.XINHUA_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.XINHUA_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return xinhuaParseCsvFile(result.getResultCsv());
    }

    //腾讯爬虫
    private List<YqMediaEntity> tencentCrawlData(YqCrawlerEntity entity) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.tencentBasePath + File.separator + "tencent.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        tencentShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.TENCENT_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.TENCENT_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return tencentParseCsvFile(result.getResultCsv());
    }

    //搜狗微信爬虫
    private List<YqMediaEntity> sougouWechatCrawlData(YqCrawlerEntity entity) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.sougouWechatBasePath + File.separator + "sougou_wechat.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        sougouWechatShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.SOUGOUWECHAT_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.SOUGOUWECHAT_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //解析结果文件并返回
        return sougouWechatParseCsvFile(result.getResultCsv());
    }

    //新浪财经爬虫
    private void financeSinaCrawlData(YqCrawlerEntity entity, String platForm) {
        Date date = new Date();
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.financeSinaBasePath + File.separator + "finance-sina.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        financeSinaShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.FINANCESINA_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.FINANCESINA_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //录入
        FCIEntryCommonParse(result.getResultCsvDir(), platForm, YqModelAnalysisConstant.FINANCESINA_ADDRESS, 8);
    }

    //芝加哥期货交易所爬虫
    private void cbotCrawlData(YqCrawlerEntity entity, String platForm) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.cbotBasePath + File.separator + "cbot.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        cbotShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.CBOT_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.CBOT_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //录入
        FCIEntryCommonParse(result.getResultCsvDir(), platForm, YqModelAnalysisConstant.CBOT_ADDRESS, 4);
    }

    //上海国际能源交易中心爬虫
    private void ineDailyCrawlData(YqCrawlerEntity entity, String platForm) {
        //爬虫脚本文件
        String pythonScript = YqModelAnalysisConstant.ineDailyBasePath + File.separator + "ine-daily.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        ineDailyShellScript(result);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        Date date = new Date();
        //删除ret文件
        MergeUtil.deleteFile(YqModelAnalysisConstant.INEDAILY_RET_PATH);
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(YqModelAnalysisConstant.INEDAILY_RET_PATH);
        //更新配置记录
        updateCrawler(entity, result, date);
        //录入
        FCIEntryCommonParse(result.getResultCsvDir(), platForm, YqModelAnalysisConstant.INEDAILY_ADDRESS, 2);
    }

    /**
     * 广东省统计局爬虫
     * @param entity 舆情配置实体
     * @param basePath 爬虫工作路径
     * @param retPath 爬虫运行监控ret文件路径
     * @param guidangDir 归档目录
     * @param address 爬取网址
     * @param dataCategoryEnum 数据类别
     * @param gdEnum 广东省统计局爬虫输入类别枚举
     * @param typeEnum 媒体类别
     * @return
     */
    private void gdProvStatisticsCrawlData(YqCrawlerEntity entity, String basePath, String retPath
            , String guidangDir, String address, CrawlerDataCategoreEnum dataCategoryEnum
            , CrawlerStatisticsTypeOfGDProvEnum gdEnum, CrawlerStatisticsTypeOfGDProvEnum typeEnum) {
        //爬虫启动时间
        Date date = new Date();
        //创建归档目录
        createDirectory(guidangDir);
        //爬虫脚本文件
        String pythonScript = basePath + File.separator + "stats-gd-gov.py";
        //构造结果文件路径
        YqCrawlResultEntity result = getResultPaths(entity, pythonScript);
        //写入脚本
        String latestFileDate = getMaxDateFromDirectory(guidangDir);//获取归档目录下文件的最新日期
        gdStatisticsShellScript(result, basePath, latestFileDate, gdEnum);
        //执行脚本获取输出
        String[] cmd = {"sh", result.getShellFilePath()};
        GosShell gosShell = new GosShell(YqModelAnalysisConstant.crawlerJobPath, cmd, result.getLogPath());
        //删除ret文件
        MergeUtil.deleteFile(retPath);
        //运行爬虫脚本
        gosShell.run();
        //监控ret文件，等待爬虫完毕
        monitorRetFile(retPath);
        //更新配置记录
        updateCrawler(entity, result, date);
        //录入、归档
        GDEntryCommonParse(result.getResultCsvDir(), guidangDir, address, dataCategoryEnum, typeEnum);
    }

    //构造头条爬虫脚本
    private void toutiaoShellScript(YqCrawlResultEntity result) {
        commonShellScript(result, YqModelAnalysisConstant.toutiaoBasePath);
    }

    //构造微博爬虫脚本
    private void weiboShellScript(YqCrawlResultEntity result) {
        String scriptContent = String.format(
                "#!/bin/bash\n" +
                        "%s\n" +
                        "conda activate %s/weibo-search;\n" +
                        "cd %s;\n" +
                        "scrapy crawl search -a keyword=%s -a output=%s > %s 2>&1 &\n",
                YqModelAnalysisConstant.SOURCE_PATH,
                YqModelAnalysisConstant.CONDA_PATH,
                YqModelAnalysisConstant.weiboBasePath,
                result.getKeywordPath(),
                result.getResultCsv(),
                result.getOutputLog()
        );
        try (BufferedWriter shWriter = new BufferedWriter(new FileWriter(result.getShellFilePath()))) {
            shWriter.write(scriptContent);
        } catch (IOException e) {
            throw new BizCommonException(".sh脚本文件写入出错");
        }
    }

    //构造小红书爬虫脚本
    private void xiaohongshuShellScript(YqCrawlResultEntity result) {
        String scriptContent = String.format(
                "#!/bin/bash\n" +
                        "%s\n" +
                        "conda activate %s/news_scrapy\n" +
                        "cd %s;\n" +
                        "python %s --platform xhs --lt cookie --type search --keywords %s --output %s > %s 2>&1 &\n",
                YqModelAnalysisConstant.SOURCE_PATH,
                YqModelAnalysisConstant.CONDA_PATH,
                YqModelAnalysisConstant.redBookBasePath,
                result.getPythonScript(),
                result.getKeywordPath(),
                result.getResultCsv(),
                result.getOutputLog()
        );
        try (BufferedWriter shWriter = new BufferedWriter(new FileWriter(result.getShellFilePath()))) {
            shWriter.write(scriptContent);
        } catch (IOException e) {
            throw new BizCommonException(".sh脚本文件写入出错");
        }
    }

    //构造中国新闻网爬虫脚本
    private void chinaNewsNetworkShellScript(YqCrawlResultEntity result) {
        commonShellScript(result, YqModelAnalysisConstant.chinaNewsNetworkBasePath);
    }

    //构造新华网爬虫脚本
    private void xinhuaShellScript(YqCrawlResultEntity result) {
        commonShellScript(result, YqModelAnalysisConstant.xinhuaBasePath);
    }

    //构造腾讯新闻网爬虫脚本
    private void tencentShellScript(YqCrawlResultEntity result) {
        commonShellScript(result, YqModelAnalysisConstant.tencentBasePath);
    }

    //构造搜狗微信爬虫脚本
    private void sougouWechatShellScript(YqCrawlResultEntity result) {
        commonShellScript(result, YqModelAnalysisConstant.sougouWechatBasePath);
    }

    //构造新浪财经爬虫脚本
    private void financeSinaShellScript(YqCrawlResultEntity result) {
        commonShellScript2(result, YqModelAnalysisConstant.financeSinaBasePath);
    }

    //构造芝加哥期货交易所爬虫脚本
    private void cbotShellScript(YqCrawlResultEntity result) {
        commonShellScript2(result, YqModelAnalysisConstant.cbotBasePath);
    }

    //构造上海国际能源交易中心爬虫脚本
    private void ineDailyShellScript(YqCrawlResultEntity result) {
        String scriptContent = String.format(
                "#!/bin/bash\n" +
                        "%s\n" +
                        "conda activate %s/news_scrapy\n" +
                        "cd %s\n" +
                        "python %s -o1 %s -o2 %s > %s 2>&1 &\n",
                YqModelAnalysisConstant.SOURCE_PATH,
                YqModelAnalysisConstant.CONDA_PATH,
                YqModelAnalysisConstant.ineDailyBasePath,
                result.getPythonScript(),
                result.getResultCsv(),
                result.getResultCsv2(),
                result.getOutputLog());
        try (BufferedWriter shWriter = new BufferedWriter(new FileWriter(result.getShellFilePath()))) {
            shWriter.write(scriptContent);
        } catch (IOException e) {
            throw new BizCommonException(".sh脚本文件写入出错");
        }
    }

    //广东省统计局通用爬虫脚本
    private void gdStatisticsShellScript(YqCrawlResultEntity result, String basePath, String latestFileDate, CrawlerStatisticsTypeOfGDProvEnum gdEnum) {
        String scriptContent = String.format(
                "#!/bin/bash\n" +
                        "%s\n" +
                        "conda activate %s/news_scrapy\n" +
                        "cd %s\n" +
                        "python %s -i %s -o %s --all -st %s > %s 2>&1 &\n",
                YqModelAnalysisConstant.SOURCE_PATH,
                YqModelAnalysisConstant.CONDA_PATH,
                basePath,
                result.getPythonScript(),
                gdEnum.getMessage(),
                result.getResultCsvDir(),
                latestFileDate,
                result.getOutputLog());
        try (BufferedWriter shWriter = new BufferedWriter(new FileWriter(result.getShellFilePath()))) {
            shWriter.write(scriptContent);
        } catch (IOException e) {
            throw new BizCommonException(".sh脚本文件写入出错");
        }
    }

    //监控ret文件，等待爬虫完毕(空：正在爬虫  0：爬虫结束  非0： 出错)
    public void monitorRetFile(String retFilePath) {
        long l = System.currentTimeMillis();
        Path path = Paths.get(retFilePath);
        // 持续检查文件是否存在
        while (!Files.exists(path)) {
            System.out.println("ret 文件不存在，等待文件创建..." + "ret文件路径：" + retFilePath);
            try {
                TimeUnit.SECONDS.sleep(10); // 每隔10秒检查一次文件是否存在
            } catch (InterruptedException e) {
                e.printStackTrace();
                return;
            }
        }
        boolean isFinished = false;
        try {
            while (!isFinished) {
                // 读取文件内容
                String content = new String(Files.readAllBytes(path), StandardCharsets.UTF_8).trim();
                //判断是否超时
                if((System.currentTimeMillis() - l) > 21600000){//6小时
                    System.out.println("ret文件路径：" + retFilePath + "爬虫任务超时，详情请查看爬虫日志");
                    isFinished = true;
                }
                // 判断文件状态
                if (content.isEmpty()) {
                    System.out.println("爬虫正在运行..." + "ret文件路径：" + retFilePath);
                } else if ("0".equals(content)) {
                    System.out.println("爬虫任务结束。" + "ret文件路径：" + retFilePath + "  耗时：" + (System.currentTimeMillis() - l));
                    isFinished = true;
                } else {
                    System.out.println("ret文件路径：" + retFilePath + "爬虫任务出错，详情请查看爬虫日志");
                    isFinished = true;
                }
                // 如果任务还没完成或出错，等待一段时间后再检查
                if (!isFinished) {
                    TimeUnit.SECONDS.sleep(10); // 每10秒检查一次文件
                }
            }
        } catch (IOException | InterruptedException e) {
            e.printStackTrace();
            throw new RuntimeException("监控 ret 文件时出错" + "ret文件路径：" + retFilePath, e);
        }
    }

    //头条新闻csv文件解析(num,key,id,title,pubtime,media,like,collect,share,comment_count,content,url)
    private List<YqMediaEntity> toutiaoParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
//                int num = Integer.parseInt(line[0]);
//                String key = line[1];
                String originId = line[2];
                String title = line[3];
                String pubtime = line[4];
                String publishUserName = line[5];
                int like = parseInteger(line[6]);
//                int collect = parseInteger(line[7]);
                int share = parseInteger(line[8]);
                int commentCount = Integer.parseInt(line[9]);
                String content = line[10];
                String link = line[11];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
                Date publishTime = dateFormat.parse(pubtime);
                int hot = like + commentCount * 10 + like * 15 + share * 20;
                YqMediaEntity news = new YqMediaEntity(title, digest, content, "今日头条", hot, "", publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //微博csv文件解析(关键词,id,bid,user_id,用户昵称,微博正文,头条文章url,发布位置,艾特用户,话题,转发数,评论数,点赞数,发布时间,发布工具,微博图片url,微博视频url,retweet_id,ip,user_authentication)
    public List<YqMediaEntity> weiboParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
                String keyword = line[0];
                String originId = line[1];
//                String bid = line[2];
//                long userId = Long.parseLong(line[3]);
                String publishUserName = line[4];
                String content = line[5];
//                String link = line[6];
//                String publishLocation = line[7];
//                String atUsers = line[8];
//                String topics = line[9];
                int share = parseInteger(line[10]);
                int commentCount = parseInteger(line[11]);
                int like = parseInteger(line[12]);
                String pubtime = line[13];
//                String publishTool = line[14];
//                String weiboImageUrl = line[15];
//                String weiboVideoUrl = line[16];
//                String retweetId = line[17];
                String ip = line[18];
//                String userAuthentication = line[19];
                String link = line[20];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
                Date publishTime = dateFormat.parse(pubtime);
                int hot = like + commentCount * 10 + like * 15 + share * 20;
                // 找到第一个 # 和第二个 # 的索引位置
                int firstHash = content.indexOf('#');
                int secondHash = content.indexOf('#', firstHash + 1);
                String name = (firstHash != -1 && secondHash != -1) ? content.substring(firstHash + 1, secondHash) : keyword;
                YqMediaEntity news = new YqMediaEntity(name, digest, content, "微博", hot, ip, publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //小红书csv文件解析(note_id,type,title,desc,video_url,time,last_update_time,user_id,nickname,avatar,liked_count,collected_count,comment_count,share_count,ip_location,image_list,tag_list,last_modify_ts,note_url)
    public List<YqMediaEntity> xiaohongshuParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
                String originId = line[0];
//                String type = line[1];
                String title = line[2];
                String content = line[3];
//                String videoUrl = line[4];
                String pubtime = line[5];
//                String lastUpdateTime = line[6];
//                String userId = line[7];
                String publishUserName = line[8];
//                String avatar = line[9];
                int like = parseInteger(line[10]);
//                int collectedCount = parseInteger(line[11]);
                int commentCount = parseInteger(line[12]);
                int share = Integer.parseInt(line[13]);
                String ipLocation = line[14];
//                String imageList = line[15];
//                String tagList = line[16];
//                long lastModifyTs = Long.parseLong(line[17]);
                String link = line[18];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                Date publishTime = dateFormat.parse(pubtime);
                int hot = like + commentCount * 10 + like * 15 + share * 20;
                YqMediaEntity news = new YqMediaEntity(title, digest, content, "小红书", hot, ipLocation, publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //中国新闻网csv文件解析(key,id,title,pubtime,media,content,url,ip)
    public List<YqMediaEntity> chinaNewsNetworkParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
//                String key = line[0];
                String originId = line[1];
                String title = line[2];
                String pubtime = line[3];
                String publishUserName = line[4];
                String content = line[5];
                String link = line[6];
                String ip = line[7];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                int hot = 0;
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                Date publishTime = dateFormat.parse(pubtime);
                YqMediaEntity news = new YqMediaEntity(title, digest, content, "中国新闻网", hot, ip, publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //新华网csv文件解析(key,id,title,pubtime,media,content,url)
    public List<YqMediaEntity> xinhuaParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
//                String key = line[0];
                String originId = line[1];
                String title = line[2];
                String pubtime = line[3];
                String publishUserName = line[4];
                String content = line[5];
                String link = line[6];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                int hot = 0;
                String city = "";
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                Date publishTime = dateFormat.parse(pubtime);
                YqMediaEntity news = new YqMediaEntity(title, digest, content, "新华网", hot, city, publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //腾讯新闻csv文件解析(num,key,id,title,pubtime,media,like,collect,share,comment_count,content,url,ip)
    public List<YqMediaEntity> tencentParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
//                int num = Integer.parseInt(line[0]);
//                String key = line[1];
                String originId = line[2];
                String title = line[3];
                String pubtime = line[4];
                String publishUserName = line[5];
                int like = parseInteger(line[6]);
//                int collect = parseInteger(line[7]);
                int share = parseInteger(line[8]);
                int commentCount = Integer.parseInt(line[9]);
                String content = line[10];
                String link = line[11];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                Date publishTime = dateFormat.parse(pubtime);
                int hot = like + commentCount * 10 + like * 15 + share * 20;
                YqMediaEntity news = new YqMediaEntity(title, digest, content, "腾讯新闻", hot, "", publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //搜狗微信csv文件解析(key,id,title,pubtime,media,content,url,ip)financeSinaParseCsvFile
    public List<YqMediaEntity> sougouWechatParseCsvFile(String csvFilePath) {
        List<YqMediaEntity> newsList = new ArrayList<>();
        // 检查文件是否存在
        if (!Files.exists(Paths.get(csvFilePath))) {
            return newsList; // 返回空列表
        }
        try (CSVReader reader = new CSVReader(new FileReader(csvFilePath))) {
            String[] line;
            boolean isFirstLine = true;
            while ((line = reader.readNext()) != null) {
                if (isFirstLine) {
                    isFirstLine = false;
                    continue; // 跳过表头
                }
//                String key = line[0];
                String originId = line[1];
                String title = line[2];
                String pubtime = line[3];
                String publishUserName = line[4];
                String content = line[5];
                String link = line[6];
                String ip = line[7];
                //如果正文内容为空，则跳过
                if (StringUtils.isBlank(content)) {
                    continue;
                }
                String digest = content.length() <= 200 ? content : content.substring(0, 200);
                int hot = 0;
                SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
                Date publishTime = dateFormat.parse(pubtime);
                YqMediaEntity news = new YqMediaEntity(title, digest, content, "搜狗微信", hot, ip, publishTime, publishUserName, link, originId);
                newsList.add(news);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return newsList;
    }

    //FINANCESINA新浪财经、CBOT芝加哥期货交易所、INEDAILY上海国际能源交易中心 三项的通用解析录入方法
    private void FCIEntryCommonParse(String csvFilePathOrDir, String platForm, String address, int n) {
        Date date = new Date();
        Calendar calendar = Calendar.getInstance();
        calendar.setTime(date);
        calendar.add(Calendar.DAY_OF_MONTH, -1); // 减去一天
        Date yesterDay = calendar.getTime();
        //保存日志
        DcDataGrabLogEntity log = saveLog(date, platForm, address);
        //获取csvFilePathOrDir目录下的csv文件的绝对路径
        List<String> resultCsvList = getCsvFilePaths(csvFilePathOrDir);
        List<DcDataMacroIndicatorDataEntity> dateList = new ArrayList<>();
        for (String csvPath : resultCsvList) {
            try (CSVReader reader = new CSVReader(new FileReader(csvPath))) {
                String[] line;
                boolean isFirstLine = true;
                while ((line = reader.readNext()) != null) {
                    if (isFirstLine) {
                        isFirstLine = false;
                        continue; // 跳过表头
                    }
                    String indicatorName = line[0];
                    String indicatorValue = line[n].replace("\u3000", "").trim();
                    if(!isFloat(indicatorValue)) {
                        continue;
                    }
                    DcDataMacroIndicatorDataEntity dcDataMacroIndicatorData = new DcDataMacroIndicatorDataEntity();
//                    dcDataMacroIndicatorData.setIndicatorId();//指标id
                    dcDataMacroIndicatorData.setValue(Float.parseFloat(indicatorValue));//指标数据值
                    dcDataMacroIndicatorData.setDataSource(platForm);//数据来源
                    dcDataMacroIndicatorData.setCollectionTime(yesterDay);//数据日期
                    dcDataMacroIndicatorData.setImportTime(date);//导入时间
                    dcDataMacroIndicatorData.setRepoStatus(MacroRepoStatusEnum.NO);//入库状态
                    dcDataMacroIndicatorData.setImportId(log.getId());//导入id(即日志id)
                    dcDataMacroIndicatorData.setName(indicatorName);//指标名称
                    dcDataMacroIndicatorData.setCollectWay(MacroCollectWayEnum.grab);//采集方式
                    dcDataMacroIndicatorData.setGrabTime(date);//抓取时间
                    dcDataMacroIndicatorData.setDataCategory("");//数据类别
                    dcDataMacroIndicatorData.setAreaId((indicatorName.contains("市") || indicatorName.contains("区")) ? indicatorName : "");//数据地区
                    dcDataMacroIndicatorData.setCreatorTime(date);
                    dateList.add(dcDataMacroIndicatorData);
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
        System.out.println("保存的总实体数::::"+dateList.size());
        //录入
        dcDataMacroIndicatorDataService.saveBatch(dateList);
        //回填日志抓取条数字段
        log.setTotalNum(String.valueOf(dateList.size()));
        dcDataBigmercGrabLogService.updateById(log);
        System.out.println("保存的日志::::"+log);
    }

    // 内部方法递归搜索路径
    private static String findPathInMap(Map<String, Object> map, String productName) {
        for (Map.Entry<String, Object> entry : map.entrySet()) {
            String key = entry.getKey();
            Object value = entry.getValue();
            if (key.equals(productName)) {
                // 找到目标产品，返回其名称
                return key;
            }
            if (value instanceof Map) {
                // 如果值是 Map，递归搜索子 Map
                String childPath = findPathInMap((Map<String, Object>) value, productName);
                if (!childPath.isEmpty()) {
                    // 如果找到路径，拼接当前层级的 Key 和子路径
                    return key + ":" + childPath;
                }
            }
        }
        return "";  // 没找到返回空字符串
    }

    // 查找别名对应的原始值
    private static String findOriginalValueInAlias(Map<String, Object> map, String value) {
        for (Map.Entry<String, Object> entry : map.entrySet()) {
            String alias = entry.getKey();
            Object originalValues = entry.getValue();
            if (originalValues instanceof List) {
                List<String> valuesList = (List<String>) originalValues;
                if (valuesList.contains(value)) {
                    return alias; // 找到别名，返回原始值
                }
            }
        }
        return ""; // 如果没有找到对应的别名，返回空串
    }

    // 清理字符串值的方法
    private static String cleanValue(String value) {
        if (value == null || value.isEmpty()) {
            return "";
        }
        // 如果包含中文序号，找到第一个“、”并去掉“、”及其前面的内容
        if (value.matches(".*[一二三四五六七八九十]+.*、.*")) {
            value = value.substring(value.indexOf("、") + 1); // 去掉“、”及其前面的内容
        }
        // 去除“数字.”和“数字、”
        value = value.replaceAll("^[0-9]+[.,、]?", "");
        // 去除“其中”二字
        value = value.replaceAll("其中", "");
        // 去除普通空格、全角空格和制表符
        value = value.replaceAll(" ", "").replaceAll("　", "").replaceAll("\t", "");
        // 去除“：”和“:”
        value = value.replaceAll("：", "").replaceAll(":", "")
                .replace("*", "").replace("▲", "")
                .replaceAll("#", "");
        return value;
    }

    //根据文件名表头找出需求表头
    public String getKeyOrSelf(Map<String, Object> headProductMap, String s) {
        for (Map.Entry<String, Object> entry : headProductMap.entrySet()) {
            Object value = entry.getValue();
            if (value instanceof List) {
                @SuppressWarnings("unchecked")//移除泛型类型转换警告
                List<String> valueList = (List<String>) value;
                // 检查 s 是否在列表中
                if (valueList.contains(s)) {
                    return entry.getKey();
                }
            } else {
                System.out.println("headProductMap 中的值不是 List 类型：" + value);
            }
        }
        // 如果 s 不在任何值中，返回空串
        return "";
    }

    public static void main(String[] args) {
        List<String> resultCsvList = getCsvFilePaths("C:\\Users\\feng\\Desktop\\gdQuarter");
        for (String path : resultCsvList) {
            try (CSVReader reader = new CSVReader(new FileReader(path))) {
//            try (CSVReader reader = new CSVReader(new FileReader("C:\\Users\\feng\\Desktop\\gdTotal\\2013年10月社会消费品零售总额_2013-12-28.csv"))) {
//                System.out.println(path);
                String[] line;
                boolean isFirstLine = true;
                while ((line = reader.readNext()) != null) {
                    if (isFirstLine) {
                        isFirstLine = false;
                        continue; // 跳过表头
                    }
                    String indicatorName = line[0];
                    if("甲".equals(indicatorName) || indicatorName.isEmpty() || indicatorName.startsWith("注：")) {
                        continue;
                    }
                    //如果是大标题则直接跳过
                    if(indicatorName.matches(".*[一二三四五六七八九十]+.*、.*")){
                        continue;
                    }
                    for (int i = 1; i < line.length; i++) {
//                        String cleanIndicatorName = cleanValue(indicatorName);
//                        String originalValueInAlias = findOriginalValueInAlias(ProductMapUtils.GD_INDUSTRY_PRODUCT_MAP_ALIAS, cleanIndicatorName);
//                        String productPath = findPathInMap(ProductMapUtils.GD_INDUSTRY_PRODUCT_MAP, originalValueInAlias.isEmpty() ? cleanIndicatorName : originalValueInAlias);

                        String quarterCleanIndicatorName = cleanValue(indicatorName);
                        String quarterAlias = findOriginalValueInAlias(ProductMapUtils.GD_QUARTER_PRODUCT_MAP_ALIAS, quarterCleanIndicatorName);
                        String quarterProductPath = findPathInMap(ProductMapUtils.GD_QUARTER_PRODUCT_MAP, quarterAlias.isEmpty() ? quarterCleanIndicatorName : quarterAlias);
                        if(StringUtils.isEmpty(quarterProductPath)){
                            System.out.println("指标名："+indicatorName+" 不存在于全局map中，可能为新增项。" + path);
                        }
                    }
                }
            } catch (Exception e) {
                System.out.println("错误文件：：：：");
                e.printStackTrace();
            }
        }

    }

    //获取指标数据实体(文件一行数据对应多个实体)
    private List<DcDataMacroIndicatorDataEntity> getLineList(String[] headLine, String[] line, String secondLineFirstColumn, CrawlerStatisticsTypeOfGDProvEnum typeEnum, String csvPath){
        List<DcDataMacroIndicatorDataEntity> dateList = new ArrayList<>();
        //文件指标名称
        String indicatorName = line[0];
        //如果改行指标名称是"甲"字(可能为测试数据),则跳过改行直接返回空列表
        if("甲".equals(indicatorName) || indicatorName.startsWith("注：") || indicatorName.isEmpty()){
            return dateList;
        }
        switch (typeEnum) {
            case SITUATION:
                //得到一级指标名
                if(indicatorName.contains("、")){
                    situationFirstIndicatorName = indicatorName.substring(indicatorName.indexOf("、") + "、".length());
                }
                for (int i = 1; i < line.length; i++) {
                    //需求表头
                    String realHead = getKeyOrSelf(ProductMapUtils.GD_SITUATION_HEAD_PRODUCT_MAP, headLine[i]);
                    //如果该行指标不在全局所需表头map中则跳过该列
                    if(realHead.isEmpty()){
                        System.out.println("文件：" + csvPath + " 表头列[" + headLine[i] + "]为新增列或舍弃列");
                        continue;
                    }
                    //拼接值1
                    String montageString = ("播种面积".equals(realHead) || "产量".equals(realHead)) ? "当季值" : "";
                    //拼接值2
                    String montageString2 = ":" + headLine[i].replaceAll("（.*?）|\\(.*?\\)", "").trim()
                            + (StringUtils.isBlank(montageString) ? "" : (":" + montageString));
                    //指标名称
                    String name = situationFirstIndicatorName + (indicatorName.contains(situationFirstIndicatorName) ?
                            "" : (":" + indicatorName.substring(indicatorName.indexOf(".") + ".".length())))
                            + montageString2;
                    //原指标名称
                    String originalName = indicatorName + "/" + headLine[i];
                    //指标值(去除全角空格和两端空格)
                    String value = line[i].replace("\u3000", "").trim();
                    //单位
                    String unitType;
                    if(i == 1){
                        unitType = "万亩";
                    }else if(i == 2){
                        unitType = "万吨";
                    }else {
                        unitType = "%";
                    }
                    //数据地区
                    String area = "广东";
                    //校验指标值value是否符合格式，将符合格式的数据加入dateList中
                    addDataIfValid(originalName, name, value, unitType, area, dateList);
                }
                break;
            case ADD:
                for (int i = 1; i < line.length; i++) {
                    String realHead = getKeyOrSelf(ProductMapUtils.GD_ADD_HEAD_PRODUCT_MAP, headLine[i]);
                    if(realHead.isEmpty()){
                        System.out.println("文件：" + csvPath + " 表头列[" + headLine[i] + "]为新增列或舍弃列");
                        continue;
                    }
                    String montageString = "全省总计";
                    String name;
                    if(montageString.equals(indicatorName) || secondLineFirstColumn.equals(indicatorName)){
                        name = montageString + ":" + realHead;
                    }else {
                        String modifyIndicatorName = indicatorName.replace("#", "")
                                .replace("在总计中:", "")
                                .replace("在总计中：", "")
                                .replace("\u3000", "")
                                .trim();
                        name = modifyIndicatorName + montageString + ":" + realHead;
                    }
                    String originalName = indicatorName + "/" + headLine[i];
                    String value = line[i].replace("\u3000", "").trim();
                    String unitType = "";
                    if("当月值".equals(realHead) || "累计值".equals(realHead)){
                        unitType = "亿元";
                    }else if("当月同比".equals(realHead) || "累计同比".equals(realHead)){
                        unitType = "%";
                    }
                    String area = (indicatorName.contains("市") || indicatorName.contains("区")) ? indicatorName.trim().replaceAll("[#:：\u3000]|在总计中", "") : "广东";
                    addDataIfValid(originalName, name, value, unitType, area, dateList);
                }
                break;
            case PRODUCTION:
                //如果是大标题则直接跳过
                if(indicatorName.matches(".*[一二三四五六七八九十]+.*、.*")){
                    break;
                }
                String productionCleanIndicatorName = cleanValue(indicatorName);
                //验证是否是别名，如果是别名则返回真实名，如果不是别名则返回空串
                String productionAlias = findOriginalValueInAlias(ProductMapUtils.GD_PRODUCTION_PRODUCT_MAP_ALIAS, productionCleanIndicatorName);
                String productionProductPath = findPathInMap(ProductMapUtils.GD_PRODUCTION_PRODUCT_MAP, productionAlias.isEmpty() ? productionCleanIndicatorName : productionAlias);
                if(productionProductPath.isEmpty()){
                    System.out.println("文件：" + csvPath + " 指标名[" + indicatorName + "]为新增指标或舍弃指标");
                    break;
                }
                for (int i = 2; i < line.length; i++) {
                    String realHead = getKeyOrSelf(ProductMapUtils.GD_PRODUCTION_HEAD_PRODUCT_MAP, headLine[i]);
                    if(realHead.isEmpty()){
                        System.out.println("文件：" + csvPath + " 表头列[" + headLine[i] + "]为新增列或舍弃列");
                        continue;
                    }
                    String originalName = indicatorName + "/" + headLine[i];
                    String name = productionProductPath + ":" + realHead;
                    String value = line[i].replace("\u3000", "").trim();
                    String area = "广东";
                    String unitType = (headLine[i].contains("%") || headLine[i].contains("增长") || headLine[i].contains("增 长")) ? "%" : line[1];
                    addDataIfValid(originalName, name, value, unitType, area, dateList);
                }
                break;
            case INDUSTRY:
                String industryCleanIndicatorName = cleanValue(indicatorName);
                String industryAlias = findOriginalValueInAlias(ProductMapUtils.GD_INDUSTRY_PRODUCT_MAP_ALIAS, industryCleanIndicatorName);
                String industryProductPath = findPathInMap(ProductMapUtils.GD_INDUSTRY_PRODUCT_MAP, industryAlias.isEmpty() ? industryCleanIndicatorName : industryAlias);
                if(industryProductPath.isEmpty()){
                    System.out.println("文件：" + csvPath + " 指标名[" + indicatorName + "]为新增指标或舍弃指标");
                    break;
                }
                for (int i = 2; i < line.length; i++) {
                    String realHead = getKeyOrSelf(ProductMapUtils.GD_INDUSTRY_HEAD_PRODUCT_MAP, headLine[i]);
                    if(realHead.isEmpty()){
                        System.out.println("文件：" + csvPath + " 表头列[" + headLine[i] + "]为新增列或舍弃列");
                        continue;
                    }
                    String originalName = indicatorName + "/" + headLine[i];
                    String name;
                    if("累计值".equals(realHead) || "累计同比".equals(realHead)){
                        name = industryProductPath + realHead;
                    }else{
                        name = industryProductPath + ":" + realHead;
                    }
                    String value = line[i].replace("\u3000", "").trim();
                    String area = "广东";
                    String unitType = (headLine[i].contains("%") || headLine[i].contains("增长") || headLine[i].contains("增 长")) ? "%" : line[1];
                    addDataIfValid(originalName, name, value, unitType, area, dateList);
                }
                break;
            case TOTAL:
                //如果是大标题则直接跳过
                if(indicatorName.matches(".*[一二三四五六七八九十]+.*、.*")){
                    break;
                }
                String cleanIndicatorNameTotal = cleanValue(indicatorName);
                String totalProductPath = findPathInMap(ProductMapUtils.GD_TOTAL_PRODUCT_MAP, cleanIndicatorNameTotal);
                if(totalProductPath.isEmpty()){
                    System.out.println("文件：" + csvPath + " 指标名[" + indicatorName + "]为新增指标或舍弃指标");
                    break;
                }
                for (int i = 1; i < line.length; i++) {
                    String realHead = getKeyOrSelf(ProductMapUtils.GD_TOTAL_HEAD_PRODUCT_MAP, headLine[i]);
                    if(realHead.isEmpty()){
                        System.out.println("文件：" + csvPath + " 表头列[" + headLine[i] + "]为新增列或舍弃列");
                        continue;
                    }
                    String name = ("社会消费品零售额".equals(totalProductPath) || "社会消费品零售总额".equals(totalProductPath)) ?
                            realHead : totalProductPath + ":" + realHead;
                    String originalName = indicatorName + "/" + headLine[i];
                    String value = line[i].replace("\u3000", "").trim();
                    String unitType = (headLine[i].contains("%") || headLine[i].contains("增长") || headLine[i].contains("增 长")) ? "%" : "万元";
                    String area = "广东";
                    addDataIfValid(originalName, name, value, unitType, area, dateList);
                }
                break;
            case QUARTER:
                String quarterCleanIndicatorName = cleanValue(indicatorName);
                String quarterAlias = findOriginalValueInAlias(ProductMapUtils.GD_QUARTER_PRODUCT_MAP_ALIAS, quarterCleanIndicatorName);
                String quarterProductPath = findPathInMap(ProductMapUtils.GD_QUARTER_PRODUCT_MAP, quarterAlias.isEmpty() ? quarterCleanIndicatorName : quarterAlias);
                if(quarterProductPath.isEmpty()){
                    System.out.println("文件：" + csvPath + " 指标名[" + indicatorName + "]为新增指标或舍弃指标");
                    break;
                }
                for (int i = 1; i < line.length; i++) {
                    String realHead = getKeyOrSelf(ProductMapUtils.GD_QUARTER_HEAD_PRODUCT_MAP, headLine[i]);
                    if(realHead.isEmpty()){
                        System.out.println("文件：" + csvPath + " 表头列[" + headLine[i] + "]为新增列或舍弃列");
                        continue;
                    }
                    String originalName = indicatorName + "/" + headLine[i];
                    String name = "广东:GDP:"
                            + ("地区生产总值（亿元）".equals(quarterProductPath) ? realHead : (quarterProductPath + ":" + realHead));
                    String value = line[i].replace("\u3000", "").trim();
                    String unitType = (headLine[i].contains("%") || headLine[i].contains("增长") || headLine[i].contains("增 长")) ? "%" : "亿元";
                    String area = "广东";
                    addDataIfValid(originalName, name, value, unitType, area, dateList);
                }
                break;
            default:
                throw new BizCommonException("不存在该媒体");
        }
        return dateList;
    }

    //校验指标值是否符合格式，将符合格式的数据加入dateList中
    private void addDataIfValid(String originalName, String name, String value, String unitType, String area, List<DcDataMacroIndicatorDataEntity> dateList) {
        if (isFloat(value)) {
            DcDataMacroIndicatorDataEntity data = new DcDataMacroIndicatorDataEntity();
            data.setOriginalName(originalName);
            data.setName(name); // 指标名称
            data.setValue(Float.parseFloat(value)); // 指标数据值
            data.setUnitType(unitType); // 指标单位
            data.setAreaId(area); // 数据地区
            dateList.add(data);
        }
    }

    //广东省统计局六大类的通用解析录入、归档方法
    private void GDEntryCommonParse(String csvFilePathOrDir, String guidangDir, String address, CrawlerDataCategoreEnum dataCategoryEnum, CrawlerStatisticsTypeOfGDProvEnum typeEnum) {
        Date date = new Date();
        //保存日志
        DcDataGrabLogEntity log = saveLog(date, "广东省统计局", address);
        //获取csvFilePathOrDir目录下的csv文件的绝对路径
        List<String> resultCsvList = getCsvFilePaths(csvFilePathOrDir);
        //去重 移除归档目录中已存在的文件
        removeDuplicateFiles(resultCsvList, guidangDir);
        List<DcDataMacroIndicatorDataEntity> dateList = new ArrayList<>();
        for (String csvPath : resultCsvList) {
            try (CSVReader reader = new CSVReader(new FileReader(csvPath))) {
                String[] headLine = null;
                String[] line;
                boolean isFirstLine = true;
                String secondLineFirstColumn = null; // 用于存储第二行第一列的值
                while ((line = reader.readNext()) != null) {
                    if (isFirstLine) {
                        headLine = line;
                        isFirstLine = false;
                        continue; // 跳过表头
                    }
                    // 获取第二行第一列
                    if (secondLineFirstColumn == null) {
                        if (line.length > 0) { // 确保第二行有数据
                            secondLineFirstColumn = line[0]; // 获取第二行第一列的值
                        }
                    }
                    //获取指标数据实体(文件一行数据对应多个实体)
                    List<DcDataMacroIndicatorDataEntity> lineList = getLineList(headLine, line, secondLineFirstColumn, typeEnum, csvPath);
                    for (DcDataMacroIndicatorDataEntity data : lineList) {
                        data.setDataSource("统计局");//数据来源
                        data.setCollectionTime(getLastDayOfMonth(csvPath));//数据日期
                        data.setImportTime(date);//导入时间
                        data.setGrabTime(date);//抓取时间
                        data.setCreatorTime(date);//创建时间
                        data.setRepoStatus(MacroRepoStatusEnum.NO);//入库状态
                        data.setImportId(log.getId());//导入id(即日志id)
                        data.setCollectWay(MacroCollectWayEnum.grab);//采集方式
                        data.setDataCategory(dataCategoryEnum.getCode());//数据类别
                        data.setAccumulateTime(parseFileName(csvPath));
                    }
                    dateList.addAll(lineList);
                }
            } catch (Exception e) {
                System.out.println("错误文件：：：："+csvPath);
                e.printStackTrace();
            }
        }
        //重置暂存的一级指标名
        situationFirstIndicatorName = "";
        System.out.println(typeEnum.getMessage() + "保存的总实体数::::"+dateList.size());
        //录入
        dcDataMacroIndicatorDataService.saveBatch(dateList);
        //回填日志抓取条数字段
        log.setTotalNum(String.valueOf(dateList.size()));
        dcDataBigmercGrabLogService.updateById(log);
        System.out.println(typeEnum.getMessage() + "保存的日志::::"+log);
        //归档 生产环境保存归档文件
        try {
            copyFilesToDirectory(resultCsvList, guidangDir);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    //根据绝对路径获取文件名中的年月获取最后一天的日期
    private Date getLastDayOfMonth(String fileName) {
        fileName = fileName.replaceFirst("\\d[—-]", "");
        // 定义正则表达式
        String monthRegex = "(\\d{4})年(\\d+)(?:-(\\d+))?月"; // 匹配年份+月份
        String monthRegex2 = "(\\d{4})年(\\d+)(?:-(\\d+))?份"; // 匹配年份+份
        String quarterRegex = "(\\d{4})年([一二三四1234])季度"; // 匹配年份+季度（支持数字和汉字）
        String halfYearRegex = "(\\d{4})年(上半年|下半年)"; // 匹配上半年和下半年
        String fullYearRegex = "(\\d{4})年(全年)"; // 匹配全年
        // 先尝试匹配年份+月份
        Pattern monthPattern = Pattern.compile(monthRegex);
        Matcher monthMatcher = monthPattern.matcher(fileName);
        if (monthMatcher.find()) {
            int year = Integer.parseInt(monthMatcher.group(1));
            int month = monthMatcher.group(3) != null
                    ? Integer.parseInt(monthMatcher.group(3)) // 如果有"-"后的月份，取它
                    : Integer.parseInt(monthMatcher.group(2)); // 否则取第一个月份
            return Date.from(YearMonth.of(year, month).atEndOfMonth().atStartOfDay(ZoneId.systemDefault()).toInstant());
        }
        // 先尝试匹配年份+份
        Pattern monthPattern2 = Pattern.compile(monthRegex2);
        Matcher monthMatcher2 = monthPattern2.matcher(fileName);
        if (monthMatcher2.find()) {
            int year = Integer.parseInt(monthMatcher2.group(1));
            int month = monthMatcher2.group(3) != null
                    ? Integer.parseInt(monthMatcher2.group(3)) // 如果有"-"后的月份，取它
                    : Integer.parseInt(monthMatcher2.group(2)); // 否则取第一个月份
            return Date.from(YearMonth.of(year, month).atEndOfMonth().atStartOfDay(ZoneId.systemDefault()).toInstant());
        }
        // 尝试匹配年份+季度
        Pattern quarterPattern = Pattern.compile(quarterRegex);
        Matcher quarterMatcher = quarterPattern.matcher(fileName);
        if (quarterMatcher.find()) {
            int year = Integer.parseInt(quarterMatcher.group(1));
            String quarter = quarterMatcher.group(2);
            int month;
            switch (quarter) {
                case "一":
                case "1":
                    month = 3;
                    break;
                case "二":
                case "2":
                    month = 6;
                    break;
                case "三":
                case "3":
                    month = 9;
                    break;
                case "四":
                case "4":
                    month = 12;
                    break;
                default:
                    month = 0; // 无效值
                    break;
            };
            return Date.from(YearMonth.of(year, month).atEndOfMonth().atStartOfDay(ZoneId.systemDefault()).toInstant());
        }
        // 尝试匹配上半年或下半年
        Pattern halfYearPattern = Pattern.compile(halfYearRegex);
        Matcher halfYearMatcher = halfYearPattern.matcher(fileName);
        if (halfYearMatcher.find()) {
            int year = Integer.parseInt(halfYearMatcher.group(1));
            String halfYear = halfYearMatcher.group(2);
            int month = "上半年".equals(halfYear) ? 6 : 12;
            return Date.from(YearMonth.of(year, month).atEndOfMonth().atStartOfDay(ZoneId.systemDefault()).toInstant());
        }
        // 尝试匹配全年
        Pattern fullYearPattern = Pattern.compile(fullYearRegex);
        Matcher fullYearMatcher = fullYearPattern.matcher(fileName);
        if (fullYearMatcher.find()) {
            int year = Integer.parseInt(fullYearMatcher.group(1));
            return Date.from(YearMonth.of(year, 12).atEndOfMonth().atStartOfDay(ZoneId.systemDefault()).toInstant());
        }
        // 如果都未匹配成功，返回 null
        return null;
    }

    //获取数据累计日期 根据绝对路径提取"/"到指定字符（社、广、主、工）之间的字符串
    private static String parseFileName(String csvPath) {
        String fileName = csvPath.substring(csvPath.lastIndexOf('/') + 1);
        // 定义正则表达式匹配 "社"、"广"、"主"、"工"、"产"
        String regex = "^(.*?)(社|广|主|工|产)";
        Pattern pattern = Pattern.compile(regex);
        // 匹配文件名
        Matcher matcher = pattern.matcher(fileName);
        // 如果匹配成功，提取相应的字符串
        if (matcher.find()) {
            return matcher.group(1); // 返回开头到匹配字符之间的部分
        }
        // 如果没有匹配到指定字符，则返回""
        return "";
    }

    //保存爬虫日志
    private DcDataGrabLogEntity saveLog(Date date, String platForm, String address) {
        //保存爬取日志
        DcDataGrabLogEntity log = new DcDataGrabLogEntity();
        log.setGrabTime(date);
        log.setPlatform(platForm);
        log.setAddress(address);
        log.setRepoNum(0L);
        log.setCreatorTime(date);
        dcDataBigmercGrabLogService.save(log);
        return log;
    }

    //判断字符串是否可以转为float类型
    private boolean isFloat(String str) {
        if (str == null || str.trim().isEmpty()) {
            return false; // 空字符串或仅包含空白字符
        }
        // 正则表达式匹配浮点数
        String floatRegex = "^[+-]?\\d*\\.?\\d+([eE][+-]?\\d+)?$";
        return Pattern.matches(floatRegex, str.trim());
    }

    //将resultCsvList中的文件复制一份到guidangDir目录中 生产环境保存归档文件
    private void copyFilesToDirectory(List<String> resultCsvList, String guidangDir) throws IOException {
        Map<String, String> fileDataMap = new HashMap<>();
        // 将 guidangDir 转换为 Path 对象
        Path targetDir = new File(guidangDir).toPath();
        // 遍历 resultCsvList 中的文件路径，将文件复制到目标目录
        for (String filePath : resultCsvList) {
            Path sourcePath = new File(filePath).toPath(); // 源文件路径
            Path targetPath = targetDir.resolve(sourcePath.getFileName()); // 目标文件路径
            // 复制文件，如果文件已存在则覆盖
            Files.copy(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING);
            String content = new String(Files.readAllBytes(Paths.get(filePath)), "UTF-8");
            fileDataMap.put(String.valueOf(targetPath), content);
        }
        //生产环境保存归档文件
        String json = JSON.toJSONString(fileDataMap);
        if (priceFeignClient.saveFiles(json).getCode() != 200) {
            throw new RuntimeException("文件上传失败");
        }
    }

    //获取目录下csv文件列表
    private static List<String> getCsvFilePaths(String csvFilePathOrDir) {
        // 使用 Files.list 遍历目录并过滤出 .csv 文件
        try (Stream<Path> paths = Files.list(Paths.get(csvFilePathOrDir))) {
            return paths
                    .filter(Files::isRegularFile)           // 只选择常规文件
                    .filter(path -> path.toString().endsWith(".csv")) // 只选择 .csv 文件
                    .map(path -> path.toAbsolutePath().toString())    // 转换为绝对路径字符串
                    .collect(Collectors.toList());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    //将String解析为Integer,空则取默认值0
    private int parseInteger(String value) {
        try {
            return Integer.parseInt(value);
        } catch (NumberFormatException e) {
            return 0; // 默认值
        }
    }

    //更新配置记录
    private void updateCrawler(YqCrawlerEntity entity, YqCrawlResultEntity result, Date date) {
        List<String> resultCsvList = getCsvFilePaths(result.getResultCsvDir());
        int lineCount = 0;
        int headCount = 0;
        for (String csvPath : resultCsvList) {
            try (CSVReader reader = new CSVReader(new FileReader(csvPath))) {
                headCount++;
                while (reader.readNext() != null) {
                    lineCount++;
                }
            } catch (Exception e) {
                throw new BizCommonException("读取爬虫结果文件出错");
            }
        }
        System.out.println(entity.getName() + "爬取条数：" + (lineCount - headCount));
        YqCrawlerEntity crawlerEntity = yqCrawlerService.getById(entity.getId());//因为定时任务可能会修改状态，需要重新获取一遍实体，确保状态一致
        crawlerEntity.setRunTimes(crawlerEntity.getRunTimes() + 1);
        crawlerEntity.setCurrentRecords(lineCount == 0 ? 0 : (lineCount - headCount));
        crawlerEntity.setTotalRecords(crawlerEntity.getTotalRecords() + (lineCount == 0 ? 0 : (lineCount - headCount)));
        crawlerEntity.setLastRunTime(date);
        crawlerEntity.setLogFile(result.getOutputLog());
        crawlerEntity.setLastModifyTime(date);
        if (!yqCrawlerService.updateById(crawlerEntity)) {
            throw new BizCommonException("更新配置记录出错");
        }
        if(priceFeignClient.updateCrawlerByName(crawlerEntity).getCode() != 200){
            throw new BizCommonException("更新生产环境配置记录出错");
        }
        //生产环境保存日志文件
        Map<String, String> fileDataMap = new HashMap<>();
        try {
            String fileContent = new String(Files.readAllBytes(new File(crawlerEntity.getLogFile()).toPath()), "UTF-8");
            fileDataMap.put(crawlerEntity.getLogFile(), fileContent);
            String json = JSON.toJSONString(fileDataMap);
            if (priceFeignClient.saveFiles(json).getCode() != 200) {
                throw new RuntimeException("文件上传失败");
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    //获取归档目录下文件名中的最大时间
    public String getMaxDateFromDirectory(String directoryPath) {
        File directory = new File(directoryPath);
        // 获取目录下的所有文件
        File[] files = directory.listFiles();
        // 如果目录为空或没有文件，返回默认日期
        if (files == null || files.length == 0) {
            System.out.println("归档目录下没有文件");
            return "1970-01-01";
        }
        // 存储提取到的日期
        List<String> dateList = new ArrayList<>();
        for (File file : files) {
            String fileName = file.getName();
            System.out.println("文件名::::"+fileName);
            // 查找日期的起始位置
            int startIndex = fileName.lastIndexOf('_') + 1; // "_" 后的第一个字符
            int endIndex = fileName.lastIndexOf('.');      // "." 前的第一个字符
            // 提取日期子字符串
            if (startIndex > 0 && endIndex > startIndex) {
                String date = fileName.substring(startIndex, endIndex);
                System.out.println("提取的日期: " + date);
                dateList.add(date);
            }
        }
        // 如果没有匹配到任何日期，返回默认日期
        if (dateList.isEmpty()) {
            return "1970-01-01";
        }
        System.out.println("dateList::::"+dateList);
        // 比较所有提取到的日期并返回最大日期
        return dateList.stream()
                .max(Comparator.naturalOrder()) // 按字符串自然顺序比较
                .orElse("1970-01-01");
    }

    //构造通用爬虫脚本 模型分析
    private void commonShellScript(YqCrawlResultEntity result, String basePath) {
        String scriptContent = String.format(
                "#!/bin/bash\n" +
                        "%s\n" +
                        "conda activate %s/news_scrapy\n" +
                        "cd %s\n" +
                        "python %s -i %s -o %s > %s 2>&1 &\n",
                YqModelAnalysisConstant.SOURCE_PATH,
                YqModelAnalysisConstant.CONDA_PATH,
                basePath,
                result.getPythonScript(),
                result.getKeywordPath(),
                result.getResultCsv(),
                result.getOutputLog()
        );
        try (BufferedWriter shWriter = new BufferedWriter(new FileWriter(result.getShellFilePath()))) {
            shWriter.write(scriptContent);
        } catch (IOException e) {
            throw new BizCommonException(".sh脚本文件写入出错");
        }
    }

    //构造通用爬虫脚本 爬虫录入
    private void commonShellScript2(YqCrawlResultEntity result, String basePath) {
        String scriptContent = String.format(
                "#!/bin/bash\n" +
                        "%s\n" +
                        "conda activate %s/news_scrapy\n" +
                        "cd %s\n" +
                        "python %s -o %s > %s 2>&1 &\n",
                YqModelAnalysisConstant.SOURCE_PATH,
                YqModelAnalysisConstant.CONDA_PATH,
                basePath,
                result.getPythonScript(),
                result.getResultCsv(),
                result.getOutputLog()
        );
        try (BufferedWriter shWriter = new BufferedWriter(new FileWriter(result.getShellFilePath()))) {
            shWriter.write(scriptContent);
        } catch (IOException e) {
            throw new BizCommonException(".sh脚本文件写入出错");
        }
    }

    //指定python脚本运行过程中需要用到的文件路径 以及处理关键词文件
    private YqCrawlResultEntity getResultPaths(YqCrawlerEntity entity, String pythonScript) {
        // 获取当前时间
        SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
        String dateString = sdf.format(new Date());
        //拼接存放数据文件的目录
        String path = YqModelAnalysisConstant.crawlerJobPath + File.separator + entity.getKeyValue() + File.separator + dateString;
        //创建所有中间目录
        createDirectory(path);
        //shell脚本
        String shellFilePath = path + File.separator + "shell.sh";
        //shell日志
        String logPath = path + File.separator + "shell.log";
        //爬虫结果
        String resultCsv = path + File.separator + "output.csv";
        //爬虫结果2
        String resultCsv2 = path + File.separator + "output2.csv";
        //爬虫日志
        String outputLog = path + File.separator + "output.log";
        //关键词
        String keywordPath = path + File.separator + "key.txt";
        YqCrawlResultEntity result = new YqCrawlResultEntity(shellFilePath, logPath, resultCsv, resultCsv2, path, outputLog, keywordPath, pythonScript);
        //查询媒体配置是否开启全局关键词
        Integer globalKeyWord = entity.getGlobalKeyWord();
        String keyword = entity.getKeyWord();
        if (StringUtils.isBlank(keyword) && globalKeyWord == Integer.parseInt(CrawlerGlobalKeyYesOrNoEnum.STOPPED.getCode())) {
            return result;
        }
        //处理局部关键词和全局关键词，写入文件
        keywordsIntoFile(keyword, keywordPath, globalKeyWord);
        return result;
    }

    //处理局部关键词和全局关键词，写入文件
    private void keywordsIntoFile(String keyword, String keywordPath, Integer globalKeyWord) {
        StringBuilder sb = new StringBuilder();
        //定义用于去重的set
        Set<String> uniqueKeywords = new HashSet<>();
        if (StringUtils.isNotBlank(keyword)) {
            // 将当前媒体配置关键词利用set判断重复后写入 StringBuilder
            String[] keywordsArray = keyword.split("\n");
            for (String kw : keywordsArray) {
                if (uniqueKeywords.add(kw.trim())) {
                    sb.append(kw.trim()).append("\n"); // 关键词去重并写入
                }
            }
        }
        //判断是否开启全局关键词
        try (BufferedWriter writer = new BufferedWriter(new FileWriter(keywordPath))) {
            if (globalKeyWord == Integer.parseInt(CrawlerGlobalKeyYesOrNoEnum.RUNNING.getCode())) {
                //如果开启了全局关键词，则继续写入全局关键词
                try (BufferedReader reader = new BufferedReader(new FileReader(YqCrawlerConstant.CRAWLER_GLOBAL_KEYWORD))) {
                    String line;
                    while ((line = reader.readLine()) != null) {
                        // 利用set判断重复后写入 StringBuilder
                        if (uniqueKeywords.add(line.trim())) {
                            sb.append(line.trim()).append("\n"); // 去重并写入
                        }
                    }
                } catch (IOException e) {
                    throw new BizCommonException("读取全局关键词文件出错");
                }
            }
            // 删除最后一个换行符
            if (sb.length() > 0 && sb.charAt(sb.length() - 1) == '\n') {
                sb.deleteCharAt(sb.length() - 1);
            }
            writer.write(sb.toString());
        } catch (IOException e) {
            throw new BizCommonException("关键词写入出错");
        }
    }

    //文章分类、情感分析、关键词分析、行业分析(入数据库和ES)
    public void modelAnalysisInsertDBAndES(List<YqMediaEntity> mediaEntityList) {
        String uuid = UUID.randomUUID().toString();
        String input = YqModelAnalysisConstant.modelAnalysisContentPath + File.separator + uuid + ".txt";
        createDirectory(input.substring(0, input.lastIndexOf("/")));
        System.out.println("媒体：" + mediaEntityList.get(0).getMedia() + " 文章去重前数量：" + mediaEntityList.size());
        //将爬虫数据根据originId去重
        mediaEntityList = mediaEntityList.stream().distinct().collect(Collectors.toList());
        //根据originId和媒体过滤在数据库中已存在的文章
        //取出originId
        List<String> originIdList = mediaEntityList.stream()
                .map(YqMediaEntity::getOriginId)
                .collect(Collectors.toList());
        //取出数据库中已存在的originId(重复)
        List<String> oldOriginIdList = yqBasicInfoService.list(new LambdaQueryWrapper<YqBasicInfoEntity>()
                        .select(YqBasicInfoEntity::getOriginId)
                        .eq(YqBasicInfoEntity::getMedia, mediaEntityList.get(0).getMedia())
                        .in(YqBasicInfoEntity::getOriginId, originIdList)).stream()
                .map(YqBasicInfoEntity::getOriginId)
                .collect(Collectors.toList());
        //去除重复的originId
        originIdList.removeAll(oldOriginIdList);
        //获取到去重后的实体
        List<YqMediaEntity> distinctMediaEntityList = mediaEntityList.stream()
                .filter(entity -> originIdList.contains(entity.getOriginId()))
                .collect(Collectors.toList());
        System.out.println("媒体：" + mediaEntityList.get(0).getMedia() + " 文章去重后数量：" + distinctMediaEntityList.size());
        if (distinctMediaEntityList.isEmpty()) {
            return;
        }
        //从distinctMediaEntityList中提取正文写入input
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < distinctMediaEntityList.size(); i++) {
            sb.append(distinctMediaEntityList.get(i).getContent().replace("\n", "").replace("\r", ""));
            sb.append((i < distinctMediaEntityList.size() - 1) ? "\n" : "");
        }
        try (BufferedWriter writer = new BufferedWriter(new FileWriter(input))) {
            writer.write(String.valueOf(sb));
            writer.flush();
        } catch (IOException e) {
            throw new BizCommonException("写入正文内容出错");
        }
        //文章分类
        List<List<YqLabelScoreEntity>> thutct = yqModelAnalysisService.getThutct(input, YqModelAnalysisConstant.thutctTopN, false);
//        System.out.println("文章分类结束");
        //情感分析
        List<List<YqLabelScoreEntity>> sentiment = yqModelAnalysisService.getSentiment(input, false);
//        System.out.println("情感分析结束");
        //关键词分析
        List<List<YqLabelScoreEntity>> keyword = yqModelAnalysisService.getKeyword(input, YqModelAnalysisConstant.keywordTopN, YqModelAnalysisConstant.minWordNum, false);
//        System.out.println("关键词分析结束");
        //行业分析(要放最后，因为要重写input文件追加末尾回车)
        List<List<YqLabelScoreEntity>> domain = yqModelAnalysisService.getDomain(input, false);
//        System.out.println("行业分析结束");
        if (thutct == null || thutct.isEmpty()) {
            throw new BizCommonException("文章分类结果为空");
        }
        if (sentiment == null || sentiment.isEmpty()) {
            throw new BizCommonException("情感分析结果为空");
        }
        if (keyword == null || keyword.isEmpty()) {
            throw new BizCommonException("关键词分析结果为空");
        }
        if (domain == null || domain.isEmpty()) {
            throw new BizCommonException("行业分析结果为空");
        }
        int size = distinctMediaEntityList.size();
        if (size != thutct.size() || size != sentiment.size() || size != keyword.size() || size != domain.size()) {
            System.out.println("文章数：" + size);
            System.out.println("文章分类结果数：" + thutct.size());
            System.out.println("情感分析结果数：" + sentiment.size());
            System.out.println("关键词分析结果数：" + keyword.size());
            System.out.println("行业分析结果数：" + domain.size());
            throw new BizCommonException("模型分析结果数目与文章数不一致");
        }
        for (int i = 0; i < size; i++) {
            YqBasicInfoEntity yqBasicInfoEntity = new YqBasicInfoEntity();
            //加入行业分析结果
            List<YqLabelScoreEntity> domainLabelScoreList = domain.get(i);
            YqLabelScoreEntity domainLabelScore = domainLabelScoreList.get(0);
            String domainLabel = domainLabelScore.getLabel();
            Float domainScore = domainLabelScore.getScore();
            int industry = 0;
            for (IndustryEnum value : IndustryEnum.values()) {
                if (value.getMessage().equals(domainLabel)) {
                    industry = Integer.parseInt(value.getCode());
                    yqBasicInfoEntity.setIndustry(industry);
                }
            }
            yqBasicInfoEntity.setIndustryPvalue(domainScore);
            //加入文章分类结果
            List<YqLabelScoreEntity> thutctLabelScoreList = thutct.get(i);
            YqLabelScoreEntity thutctLabelScore = thutctLabelScoreList.get(0);
            String thutctLabel = thutctLabelScore.getLabel();
            Float thutctScore = thutctLabelScore.getScore();
            int type = 0;
            for (TextTypeEnum value : TextTypeEnum.values()) {
                if (value.getMessage().equals(thutctLabel)) {
                    type = Integer.parseInt(value.getCode());
                    yqBasicInfoEntity.setType(Integer.parseInt(value.getCode()));
                }
            }
            yqBasicInfoEntity.setTypePvalue(thutctScore);
            //加入情感分析结果
            List<YqLabelScoreEntity> sentimentLabelScoreList = sentiment.get(i);
            YqLabelScoreEntity sentimentLabelScoreFirst = sentimentLabelScoreList.get(0);
            String sentimentLabelFirst = sentimentLabelScoreFirst.getLabel();
            Float sentimentScoreFirst = sentimentLabelScoreFirst.getScore();
            YqLabelScoreEntity sentimentLabelScoreSecond = sentimentLabelScoreList.get(1);
            Float sentimentScoreSecond = sentimentLabelScoreSecond.getScore();
            if (Math.abs(sentimentScoreFirst - sentimentScoreSecond) < 0.1f) {
                yqBasicInfoEntity.setSentiment(Integer.parseInt(SentimentEnum.NEUTRAL.getCode()));
                if ((SentimentEnum.POSITIVE.getMessage()).equals(sentimentLabelFirst)) {
                    yqBasicInfoEntity.setPositivePvalue(sentimentScoreFirst);
                    yqBasicInfoEntity.setNegativePvalue(sentimentScoreSecond);
                } else {
                    yqBasicInfoEntity.setPositivePvalue(sentimentScoreSecond);
                    yqBasicInfoEntity.setNegativePvalue(sentimentScoreFirst);
                }
            } else {
                if (sentimentScoreFirst > sentimentScoreSecond) {
                    if ((SentimentEnum.POSITIVE.getMessage()).equals(sentimentLabelFirst)) {
                        yqBasicInfoEntity.setSentiment(Integer.parseInt(SentimentEnum.POSITIVE.getCode()));
                        yqBasicInfoEntity.setPositivePvalue(sentimentScoreFirst);
                        yqBasicInfoEntity.setNegativePvalue(sentimentScoreSecond);
                    } else {
                        yqBasicInfoEntity.setSentiment(Integer.parseInt(SentimentEnum.NEGATIVE.getCode()));
                        yqBasicInfoEntity.setPositivePvalue(sentimentScoreSecond);
                        yqBasicInfoEntity.setNegativePvalue(sentimentScoreFirst);
                    }
                } else {
                    if ((SentimentEnum.POSITIVE.getMessage()).equals(sentimentLabelFirst)) {
                        yqBasicInfoEntity.setSentiment(Integer.parseInt(SentimentEnum.NEGATIVE.getCode()));
                        yqBasicInfoEntity.setPositivePvalue(sentimentScoreFirst);
                        yqBasicInfoEntity.setNegativePvalue(sentimentScoreSecond);
                    } else {
                        yqBasicInfoEntity.setSentiment(Integer.parseInt(SentimentEnum.POSITIVE.getCode()));
                        yqBasicInfoEntity.setPositivePvalue(sentimentScoreSecond);
                        yqBasicInfoEntity.setNegativePvalue(sentimentScoreFirst);
                    }
                }
            }
            //加入关键词分析结果
            List<YqLabelScoreEntity> keywordLabelScoreList = keyword.get(i);
            StringBuilder s = new StringBuilder();
            for (int k = 0; k < keywordLabelScoreList.size(); k++) {
                s.append(keywordLabelScoreList.get(k).getLabel());
                if (k < keywordLabelScoreList.size() - 1) {
                    s.append(",");
                }
            }
            yqBasicInfoEntity.setKeyWord(String.valueOf(s));
            //加入基础信息
            YqMediaEntity yqMediaEntity = distinctMediaEntityList.get(i);
            YqBasicInfoDetailEntity detailEntity = new YqBasicInfoDetailEntity();
            detailEntity.setDetail(yqMediaEntity.getContent());
            detailEntity.setCreatorTime(new Date());
            detailEntity.setCreatorUserId("572395726433197509");
            String identifiedCity = CityUtils.identifyCity(yqMediaEntity.getCity(), yqMediaEntity.getContent());
            String city = StringUtils.isBlank(identifiedCity) ? "未知" : identifiedCity;
            if (!yqBasicInfoDetailService.save(detailEntity)) {
                throw new BizCommonException("保存文章详情失败");
            }
            yqBasicInfoEntity.setName(yqMediaEntity.getName());
            yqBasicInfoEntity.setDigest(yqMediaEntity.getDigest());
            yqBasicInfoEntity.setDetailId(detailEntity.getId());//回填文章详情id
            yqBasicInfoEntity.setMedia(yqMediaEntity.getMedia());
            yqBasicInfoEntity.setHot(yqMediaEntity.getHot());
//            yqBasicInfoEntity.setCity(yqMediaEntity.getCity());
            yqBasicInfoEntity.setCity(city);
            yqBasicInfoEntity.setPublishTime(yqMediaEntity.getPublishTime());
            yqBasicInfoEntity.setPublishUserName(yqMediaEntity.getPublishUserName());
            yqBasicInfoEntity.setLink(yqMediaEntity.getLink());
            yqBasicInfoEntity.setOriginId(yqMediaEntity.getOriginId());
            yqBasicInfoEntity.setCreatorTime(new Date());
            yqBasicInfoEntity.setCreatorUserId("572395726433197509");
            if (!yqBasicInfoService.save(yqBasicInfoEntity)) {
                throw new BizCommonException("保存舆情基础信息失败");
            }
            //回填舆情基础信息表id
            detailEntity.setBasicInfoId(yqBasicInfoEntity.getId());
            if (!yqBasicInfoDetailService.updateById(detailEntity)) {
                throw new BizCommonException("回填舆情基础信息表id失败");
            }
//            //插入ES
//            NewsEs newsEs = new NewsEs();
//            newsEs.setTitle(yqMediaEntity.getName());
//            newsEs.setContent(yqMediaEntity.getContent());
//            newsEs.setIndustry(industry);
//            newsEs.setType(type);
//            newsEs.setCity(city);
//            newsEs.setSource(yqMediaEntity.getMedia());
//            newsEs.setEmotionalAttributes(yqBasicInfoEntity.getSentiment());
//            newsEs.setHot(yqMediaEntity.getHot());
//            newsEs.setKeyWord(yqBasicInfoEntity.getKeyWord().split(","));
//            newsEs.setCreateTime(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss").format(yqMediaEntity.getPublishTime()));
//            if (!yqEsMethodService.add(yqBasicInfoEntity.getId(), newsEs)) {
//                throw new BizCommonException("数据插入ES失败");
//            }
//            IntelligentHotAndEmerDto dto = new IntelligentHotAndEmerDto();
//            BeanUtils.copyProperties(newsEs, dto);
//            dto.setBasicInfoId(yqBasicInfoEntity.getId());
//            dto.setDetailId(detailEntity.getId());
//            dto.setIndustryPvalue(domainScore);
//            dto.setTypePvalue(thutctScore);
//            if (!yqHotAndEmerService.identifyHotAndEmer(dto)) {
//                throw new BizCommonException("识别热点话题,突发事件出错");
//            }
        }
    }

    // 创建所有中间目录
    private void createDirectory(String directoryPath) {
        File directory = new File(directoryPath);
        if (!directory.exists() && !directory.mkdirs()) {
            throw new RuntimeException("Failed to create directory: " + directory.getAbsolutePath());
        }
    }

    //移除fileList中directoryPath目录下已存在的文件
    private void removeDuplicateFiles(List<String> fileList, String directoryPath) {
        File[] filesInDirectory = new File(directoryPath).listFiles();
        // 将directoryPath目录中的文件名存入一个集合
        List<String> directoryFileNames = new ArrayList<>();
        for (File file : filesInDirectory) {
            if (file.isFile()) {
                directoryFileNames.add(file.getName());
            }
        }
        // 使用迭代器移除 fileList 中与directoryPath目录中文件名相同的元素
        Iterator<String> iterator = fileList.iterator();
        while (iterator.hasNext()) {
            String filePath = iterator.next();
            String fileName = new File(filePath).getName();
            if (directoryFileNames.contains(fileName)) {
                iterator.remove();
            }
        }
    }

}
