package org.example.using;


import java.util.List;
import java.util.stream.Collectors;

import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.api.java.UDF0;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.api.java.UDF2;
import org.apache.spark.sql.expressions.UserDefinedFunction;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.example.utils.MysqlUtils_2;
import scala.collection.JavaConverters;
import scala.collection.mutable.WrappedArray;

import java.io.IOException;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

import static org.apache.spark.sql.functions.*;
import static org.apache.spark.sql.functions.current_timestamp;


/**
 * 创建一个类,主要用来处理tcms的故障数据,根据故障数据做视情为维修和健康评估.
 */
public class JsonProcessor_1 {

    /***
     *  定义一个方法,调用此方法,会返回健康评估的结果,返回值是一个DataSet.
     * @param spark
     * @param jsonRDD 传入当前批次的tcms的实时数据
     * @return 健康评估最终结果
     * @throws AnalysisException
     * @throws IOException
     * @throws ParseException
     */
    public static void unionHealthAssessment(SparkSession spark, JavaRDD<String> jsonRDD) throws AnalysisException, IOException, ParseException {
        // 读取到rdd内的故障数据,形成临时表.
        Dataset<Row> tcmsRealFaults = processJsonRDD(spark, jsonRDD);
//        System.out.println("当前实时数据。。。。。。");
//        tcmsRealFaults.show(false);
        Dataset<Row> filter = tcmsRealFaults.filter(" bjpjm = 'PJ00003' and phm_lb != '-' ");
        // 这里要做的就是 我们三个方法调用,每个方法内会存在两张临时表, 三张临时表union =>> t_phm_sqwx . 三张临时表入union =>> ads_phm_fault .
        processFaultType1(spark, filter);
        Dataset<Row> ads_phm_fault1Ds = spark.sql("select * from ads_phm_fault_1");
        processFaultType2(spark, filter);
        Dataset<Row> ads_phm_fault2Ds = spark.sql("select * from ads_phm_fault_2");
        processFaultType3(spark, filter);
        Dataset<Row> ads_phm_fault3Ds = spark.sql("select * from ads_phm_fault_3");
        // 入 ads_phm_fault 这张表
//        ads_phm_fault1Ds.union(ads_phm_fault3Ds).union(ads_phm_fault2Ds).show(false);
        MysqlUtils_2.writeDataToMysqlTable(ads_phm_fault1Ds.union(ads_phm_fault3Ds).union(ads_phm_fault2Ds), "ads_phm_fault", SaveMode.Append);
        Dataset<Row> sql_sqwx_1 = spark.sql("select * from t_phm_sqwx_1");
        //Dataset<Row> sql_sqwx_2 = spark.sql("select id ,s_train_id,cx,lj,ljjc,ljm,psd,psdjc,psddm,ssxt,xtpjm,ssbj,bjpjm,bjwz,bjxlh,bjdjm,cast(mxms as string),cast(mxmc as string) ,create_time,xfzt,htzt,hkzt,status from t_phm_sqwx_2");
        Dataset<Row> sql_sqwx_2 = spark.sql("select * from t_phm_sqwx_2");
        Dataset<Row> sql_sqwx_3 = spark.sql("select *  from t_phm_sqwx_3");
        // 入 t_phm_sqwx 这个表
        MysqlUtils_2.upsertDatasetToMySQL(sql_sqwx_1.union(sql_sqwx_3).union(sql_sqwx_2), "t_phm_sqwx");
        // =============================================================返回健康评估结果
    }

    /***
     * 处理HXD1D内的其他28个视情维修数据,入到 `ads_phm_fault`表 和 `t_phm_sqwx`表中,这里入库的粒度需要注意,后续数据需要调整.
     * @param spark
     * @param inputData
     */
    public static void processFaultType1(SparkSession spark, Dataset<Row> inputData) throws IOException {
        Dataset<Row> otherDS = inputData.filter(expr(" phm_lb != '变流器元件故障报警' or phm_lb != '变流器模块过热报警' "));
        Dataset<Row> rowDataset1 = otherDS.selectExpr("s_train_name as s_train_id", "s_fault_id",
                "case when mx_id is not null then mx_id  else '1111' end as mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1",
                "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxmc as mxjg",
                "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt", "czjy");

        // 对于没有组合关系的故障,来一条就入到 `ads_phm_fault`这张表中一条.
        Dataset<Row> adsPhmFaultDataset = selectColumnsAdsPhmFault(otherDS, new HashMap<>());
        //MysqlUtils_2.writeDataToMysqlTable(adsPhmFaultDataset, "ads_phm_fault", SaveMode.Append);
        rowDataset1.createOrReplaceTempView("ads_phm_fault_1");

        // 对于没有组合关系的故障,入视情维修表中的数据,这里需要做聚合功能. 逻辑:拉去到当前躺的数据,根据 `t_phm_sqwx`表中的 ID,如果存在则修改,如果不存在则插入:插入的数据粒度:每个车型、车号的每个模型[这里指的是 mc字段] 每趟的聚合结果.
        // eg:
        Dataset<Row> rowDataset = otherDS.selectExpr("id", "s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "mxmc as mx", "mxms as mxmc", "phm_gzmc as mxms", "create_time", "czjy", "xfzt", "htzt", "hkzt", "status");
        // 这里读取到的是 每个车型车号、每个模型当前趟的历史数据.
        Dataset<Row> localTipHistoryDs = MysqlUtils_2.readMysqlTable(spark, "t_phm_sqwx")
                .filter(col("HKZT").equalTo("0"))
                // 这里 mx 这个字段取到的是:t_phm_mxxq表中的 mxgj字段.
                .filter(col("mx").notEqual("元件故障"))
                .filter(col("mx").notEqual("模块过热"));

        Dataset<Row> allPhmFaultFinal = null;
        Dataset<Row> localTipHistoryDsSelect = localTipHistoryDs.selectExpr("id", "s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "mx", "mxmc", "mxms", "create_time", "czjy", "xfzt", "htzt", "hkzt", "status");
        if (localTipHistoryDs.count() > 0) {
            allPhmFaultFinal = rowDataset.union(localTipHistoryDsSelect).groupBy(col("id"), col("s_train_id"), col("cx"), col("lj"), col("ljjc"), col("ljm"), col("psd")
                            , col("psdjc"), col("psddm"), col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("bjwz"), col("bjxlh"),
                            col("bjdjm"), col("mx"), col("xfzt"), col("htzt"), col("hkzt"), col("status"), col("czjy"))
                    .agg(
                            concat_ws(",", collect_set(col("mxmc"))).alias("mxmc_res"),
                            concat_ws(",", collect_set(col("mxms"))).alias("mxms_res")
                    )
                    .selectExpr("id", "s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "mx", "mxmc_res as mxmc", "mxms_res as mxms", "xfzt", "htzt", "hkzt", "status", "czjy").withColumn("create_time", current_timestamp());
        } else {
            // 直接处理实时数据即可.
            allPhmFaultFinal = rowDataset.groupBy(col("id"), col("s_train_id"), col("cx"), col("lj"), col("ljjc"), col("ljm"), col("psd")
                            , col("psdjc"), col("psddm"), col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("bjwz"), col("bjxlh"),
                            col("bjdjm"), col("mx"), col("xfzt"), col("htzt"), col("hkzt"), col("status"), col("czjy"))
                    .agg(
                            concat_ws(",", collect_set(col("mxmc"))).alias("mxmc_res"),
                            concat_ws(",", collect_set(col("mxms"))).alias("mxms_res")
                    )
                    .withColumn("create_time", current_timestamp())
                    .selectExpr("concat_ws('-',id,bjwz) as id", "s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "mx", "mxmc_res as mxmc", "mxms_res as mxms", "xfzt", "htzt", "hkzt", "status", "czjy", "create_time");
        }
        // shi
        allPhmFaultFinal.createOrReplaceTempView("t_phm_sqwx_1");
        //MysqlUtils_2.upsertDatasetToMySQL(allPhmFaultFinal, "t_phm_sqwx");
    }


    /**
     * 这里主要处理:`牵引变流器元件故障`视情维修模型的视情维修和健康评估数据.
     *
     * @param inputData
     * @param spark
     * @return
     * @throws AnalysisException
     * @throws IOException
     */
    public static void processFaultType2(SparkSession spark, Dataset<Row> inputData) throws
            AnalysisException, IOException, ParseException {
        inputData.withColumn("mx_id", lit("10003")).withColumnRenamed("phm_gzmc", "phm_gzmc_2");
        //分情况处理数据:处理顺序:斩波管故障、A、B相上下管同时故障（5s内、需总故障发生）或 C、D相上下管同时故障（5s内、需总故障发生）、同相上下管故障（5s内、需总故障发生）、单管故障（需总故障）的顺序处理.

        // ============================================================================================================处理特殊情况的数据: 斩波管故障
        Dataset<Row> zbgggDS = inputData.filter(col("phm_gzmc").rlike(".*斩波管故障"));
        // 当前批次当前车型、车号无论报多少次,都仅仅显示一条.可以直接使用`id`进行去重操作,保证当前躺只有一条这样的数据.原因:id的产生:车型、车号+模型ID+出库时间组成的.视情维修表中数据
        // 插入到入视情维修表中的数据
        Dataset<Row> zbgggDSWithSQWX = selectColumnsWithSQWX(zbgggDS).dropDuplicates("id");
        Map<String, String> columnMapping = new HashMap<String, String>();
        columnMapping.put("s_train_id_2", "s_train_id");
        // 入 ads_phm_fault表的数据
        // TODO 这个表中新增了字段: 模型结果,存储的是 模型详情表中的 mxjg的值.
        Dataset<Row> zbgggDSWithDWDPHMFAULT = zbgggDS.selectExpr("s_train_id", "s_fault_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys",
                "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2",
                "s_fault_code", "s_fault_name", "phm_lb", "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt", "mxmc as mxjg");
        // ============================================================================================================处理特殊情况的数据: 斩波管故障

        // 获取实时数据组合情况,实际上是处理会话划分问题,5秒内划分窗口,并判断窗口内数据组合情况.在处理实时数据之前,我需要先去`ads_phm_fault`这张历史表中拉去`s_fault_time`的最大时间 到 5秒前的数据,主要处理跨批次5秒内的数据.
        Dataset<Row> lastBatchFiveDS = MysqlUtils_2.readMysqlTable(spark, "ads_phm_fault");

         /*
            TODO 拉取到上一个批次最后5秒的实时数据,union本批次的实时数据,先对当前报警数据通过 `s_fault_time`进行升序,再按照5秒内的数据进行会话划分.
                 这里先去读取ads_phm_fault的故障历史,找到上一个批次最后5秒的故障数据,如果当前表不是空的,处理历史数据,如果当前表中是空的,直接处理当前批次的实时数据.
                 在开始处理实时数据时,是按照实时报警名称和模型字典中的xxms进行左外连接的.因此这里报出的单条故障,是无法查找到模型字典是数据的,因此可以作为数据过滤条件.
                 1、先将故障时间按照升序排序,按照故障时间5秒内划分窗口.
                 2、对窗口内的数据进行车型、车号、phm_bjwz进行分组,将故障名称聚合,实现列转行操作,最终结果:[变流器1ACC元件总故障,变流器1ACC上管故障,...]
            */
        Dataset<Row> inputDataRealFaults = inputData.filter(col("phm_lb").equalTo("变流器元件故障报警"));
        inputData.cache();
        Dataset<Row> rankedRealFaults = null;
        if (lastBatchFiveDS.count() > 0) {
            // 如果当前表是空的,则直接处理实时故障数据.
            String lastBatchMaxFaultTimeStr = lastBatchFiveDS
                    .agg(max("s_fault_time")).collectAsList().get(0).getString(0); // // 获取最大的 s_fault_time
            // 定义时间格式
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
            // 使用自定义时间格式将字符串形式的时间转换为 Timestamp 类型
            Timestamp maxTime = new Timestamp(dateFormat.parse(lastBatchMaxFaultTimeStr).getTime());
            // 将最大时间转换为 Unix 时间戳（以毫秒为单位），然后减去 5 秒
            long maxTimeUnix = maxTime.getTime() - (5 * 1000);
            // 将 Unix 时间戳（以毫秒为单位）转换回 Timestamp 类型
            Timestamp maxTimeMinus5Seconds = new Timestamp(maxTimeUnix);
            // 使用 expr() 创建一个新的筛选条件，选择 s_fault_time 在最大时间的 5 秒前的数据. 注意这个的用法:expr()方法是Apache Spark SQL中的一个函数，它用于创建一个Column对象，该对象表示一个表达式。这个表达式可以是任何合法的SQL表达式，
            Column filterCondition = expr("s_fault_time >= '" + dateFormat.format(maxTimeMinus5Seconds) + "'");
            // 获取到上个批次最后5秒的数据
            columnMapping.clear();
            Dataset<Row> lastBatchFiveSecondDS = lastBatchFiveSecondDS = selectColumnsAdsPhmFault(lastBatchFiveDS.filter(filterCondition), columnMapping);
            rankedRealFaults = selectColumnsAdsPhmFault(inputDataRealFaults, columnMapping).union(lastBatchFiveSecondDS).orderBy(col("s_fault_time").asc()).withColumn("mx_id", lit("10003"));
        } else {
            rankedRealFaults = selectColumnsAdsPhmFault(inputDataRealFaults, columnMapping).orderBy(col("s_fault_time").asc()).withColumn("mx_id", lit("10003"));
        }

        // 定义窗口函数
        WindowSpec windowSpec = Window.partitionBy(col("s_train_id"), col("phm_bjwz")).orderBy("s_fault_time");
        // 计算每一行与上一行的时间差（单位：秒）
        rankedRealFaults = rankedRealFaults.withColumn(
                "time_diff",
                unix_timestamp(col("s_fault_time")).minus(lag(unix_timestamp(col("s_fault_time")), 1).over(windowSpec))
        );

        // 判断时间差是否大于等于5秒，如果是，则表示新的会话开始
        rankedRealFaults = rankedRealFaults.withColumn(
                "is_new_session",
                when(col("time_diff").isNull().or(col("time_diff").geq(5)), 1).otherwise(0)
        );
        // 计算会话 ID
        rankedRealFaults = rankedRealFaults.withColumn(
                "session_id",
                sum("is_new_session").over(windowSpec)
        );
        // 显示结果
        // 这里屏蔽掉故障名称中的 变流器1 | 变流器2 这种字符串,结果就是故障名称统一,方便后续的统一判断处理.
        Dataset<Row> rankedRealFaultsAndReplaceGzmc = rankedRealFaults.withColumn(
                "cleaned_phm_gzmc",
                regexp_replace(col("phm_gzmc"), "变流器[1-2]", "")
        );

        // 在进行行转列前,这里需要考虑后续的排序情况,以及模型结果的判断情况,这里需要关联本地的一张表,进行故障模型爆出来的组合情况.
        Dataset<Row> tPhmGzzdYjgzbjRank = MysqlUtils_2.readMysqlTable(spark, "t_phm_gzzd_yjgzbj_rank");
        //  Supported join types include: 'inner', 'outer', 'full', 'fullouter', 'full_outer', 'leftouter', 'left', 'left_outer', 'rightouter', 'right', 'right_outer', 'leftsemi', 'left_semi', 'semi', 'leftanti', 'left_anti', 'anti', 'cross'.
        Dataset<Row> rankedRealFaultsAndReplaceGzmcDeal = rankedRealFaultsAndReplaceGzmc.alias("t1").join(tPhmGzzdYjgzbjRank.alias("t2"), expr("t1.phm_gzmc=t2.phm_gzmc"), "inner");
        // 使用 s_train_id 和 phm_bjwz 分组，然后对 cleaned_phm_gzmc 字段进行聚合
        Dataset<Row> aggregatedData = rankedRealFaultsAndReplaceGzmcDeal.groupBy(col("s_train_id"), col("phm_bjwz"), col("gzmc_group_id"))
                .agg(
                        collect_list(col("cleaned_phm_gzmc")).alias("aggregated_phm_gzmc"),
                        min(col("s_fault_time")).alias("s_fault_time")
                );

        // 显示结果
        /*
            +----------+--------+-------------+---------------------------------------------------------------------------------------------------------+
            |s_train_id|phm_bjwz|gzmc_group_id|aggregated_phm_gzmc                                                                                      |
            +----------+--------+-------------+---------------------------------------------------------------------------------------------------------+
            |HXD1D-1009|saasfasd|L2           |[L2A相上管故障]                                                                                          |
            |HXD1D-1009|saasfasd|ACC          |[ACC的U相上管故障]                                                                                       |
            |HXD1D-1019|1       |L2           |[L2A相下管故障, L2A相上管故障, L2元件总故障, L2元件总故障]                                               |
            |HXD1D-1019|1       |M3           |[M3元件总故障, M3A相下管故障]                                                                            |
            |HXD1D-1009|1       |ACC          |[ACC元件总故障]                                                                                          |
            |HXD1D-1001|1       |ACC          |[ACC的U相上管故障, ACC的U相上管故障, ACC的V相上管故障, ACC元件总故障, ACC的W相下管故障, ACC的V相下管故障]|
            |HXD1D-1001|1       |L1           |[L1元件总故障, L1A相下管故障, L1A相下管故障]                                                             |
            |HXD1D-1002|1       |M3           |[M3A相上管故障]                                                                                          |
            +----------+--------+-------------+---------------------------------------------------------------------------------------------------------+
         */

        // 对输出结果进行排序,并归类情况.
          /*
            TODO 先创建了一个 sortOrderMap，用于存储不同 gzmc_group_id 对应的排序规则。接下来，我们创建了一个名为 sortAggregatedPhmGzmc 的 UDF，
                 并在 SparkSession 中注册它。这个 UDF 接受两个参数，gzmc_group_id 和 aggregated_phm_gzmc，并根据 gzmc_group_id 对应的排序规则对 aggregated_phm_gzmc 进行排序。
                 最后，我们使用 withColumn 函数和 callUDF 函数应用这个 UDF 到输入数据集的 aggregated_phm_gzmc 列，生成一个新的列 `sorted_aggregated
         */

        /**
         * 注册一个UDF,这里主要用来对相同会话中的故障数据进行指定顺序排序.
         */
        spark.udf().register("sortAggregatedPhmGzmc", new UDF2<String, WrappedArray<String>, String>() {
            @Override
            public String call(String gzmcGroupId, WrappedArray<String> aggregatedPhmGzmc) throws Exception {
                //List<String> aggregatedPhmGzmcList = JavaConverters.seqAsJavaList(aggregatedPhmGzmc);
                List<String> aggregatedPhmGzmcList = JavaConverters.seqAsJavaListConverter(aggregatedPhmGzmc).asJava();
                Map<String, List<String>> sortOrderMap = new HashMap<>();
                sortOrderMap.put("ACC", Arrays.asList("ACC元件总故障", "ACC的U相上管故障", "ACC的U相下管故障", "ACC的V相上管故障", "ACC的V相下管故障", "ACC的W相上管故障", "ACC的W相下管故障"));
                sortOrderMap.put("L1", Arrays.asList("L1元件总故障", "L1A相上管故障", "L1A相下管故障", "L1B相上管故障", "L1B相下管故障", "L1C相上管故障", "L1C相下管故障", "L1D相上管故障", "L1D相下管故障"));
                sortOrderMap.put("L2", Arrays.asList("L2元件总故障", "L2A相上管故障", "L2A相下管故障", "L2B相上管故障", "L2B相下管故障", "L2C相上管故障", "L2C相下管故障", "L2D相上管故障", "L2D相下管故障"));
                sortOrderMap.put("L3", Arrays.asList("L3元件总故障", "L3A相上管故障", "L3A相下管故障", "L3B相上管故障", "L3B相下管故障", "L3C相上管故障", "L3C相下管故障", "L3D相上管故障", "L3D相下管故障"));
                sortOrderMap.put("M1", Arrays.asList("M1元件总故障", "M1A相上管故障", "M1A相下管故障", "M1B相上管故障", "M1B相下管故障", "M1C相上管故障", "M1C相下管故障", "M1D相上管故障", "M1D相下管故障"));
                sortOrderMap.put("M2", Arrays.asList("M2元件总故障", "M2A相上管故障", "M2A相下管故障", "M2B相上管故障", "M2B相下管故障", "M2C相上管故障", "M2C相下管故障", "M2D相上管故障", "M2D相下管故障"));
                sortOrderMap.put("M3", Arrays.asList("M3元件总故障", "M3A相上管故障", "M3A相下管故障", "M3B相上管故障", "M3B相下管故障", "M3C相上管故障", "M3C相下管故障", "M3D相上管故障", "M3D相下管故障"));

                if (sortOrderMap.containsKey(gzmcGroupId)) {
                    List<String> sortOrder = sortOrderMap.get(gzmcGroupId);
                    List<String> sortedList = new ArrayList<String>(sortOrder);
                    sortedList.retainAll(aggregatedPhmGzmcList);
                    return String.join(", ", sortedList);
                } else {
                    return String.join(", ", aggregatedPhmGzmcList);
                }
            }
        }, DataTypes.StringType);
        // 使用 UDF 对 inputDS 的 aggregated_phm_gzmc 列进行排序
        Dataset<Row> sortedDS = aggregatedData.withColumn("sorted_aggregated_phm_gzmc", callUDF("sortAggregatedPhmGzmc", col("gzmc_group_id"), col("aggregated_phm_gzmc")));
        /*
            |s_train_id|phm_bjwz|gzmc_group_id|aggregated_phm_gzmc                                                                                      |sorted_aggregated_phm_gzmc                                                           |
            +----------+--------+-------------+---------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------+
            |HXD1D-1009|saasfasd|L2           |[L2A相上管故障]                                                                                          |L2A相上管故障                                                                        |
            |HXD1D-1009|saasfasd|ACC          |[ACC的U相上管故障]                                                                                       |ACC的U相上管故障                                                                     |
            |HXD1D-1019|1       |L2           |[L2A相下管故障, L2A相上管故障, L2元件总故障, L2元件总故障]                                               |L2元件总故障, L2A相上管故障, L2A相下管故障                                           |
            |HXD1D-1019|1       |M3           |[M3元件总故障, M3A相下管故障]                                                                            |M3元件总故障, M3A相下管故障                                                          |
            |HXD1D-1009|1       |ACC          |[ACC元件总故障]                                                                                          |ACC元件总故障                                                                        |
            |HXD1D-1001|1       |ACC          |[ACC的U相上管故障, ACC的U相上管故障, ACC的V相上管故障, ACC元件总故障, ACC的W相下管故障, ACC的V相下管故障]|ACC元件总故障, ACC的U相上管故障, ACC的V相上管故障, ACC的V相下管故障, ACC的W相下管故障|
            |HXD1D-1001|1       |L1           |[L1元件总故障, L1A相下管故障, L1A相下管故障]                                                             |L1元件总故障, L1A相下管故障                                                          |
            |HXD1D-1002|1       |M3           |[M3A相上管故障]                                                                                          |M3A相上管故障                                                                        |
            +----------+--------+-------------+---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------
        */
         /*
            TODO 获取当前实时报警数据的 同相上下管故障+A、B相上下管同时故障的数据+C、D相上下管同时故障的数据
                首先检查输入字符串是否包含总故障、相上管故障和相下管故障。
                接着，我们在相上管故障和相下管故障的子串中寻找匹配的字符对。如果存在多个匹配的字符对，则将hasMultiplePairs设置为true。
                最后，我们返回总故障是否存在、是否存在匹配的字符对以及是否没有多个匹配的字符对。这样，我们就可以排除具有多个相同字母的同向上下管故障的数据。
          */
        UDF1<String, Boolean> checkSubstring = new UDF1<String, Boolean>() {
            public Boolean call(String input) {
                List<String> substrings = Arrays.stream(input.split(","))
                        .map(String::trim)
                        .collect(Collectors.toList());
                boolean totalFault = substrings.stream().anyMatch(s -> s.contains("总故障"));
                List<String> upFaults = substrings.stream()
                        .filter(s -> s.matches(".*[A-Za-z]相上管故障"))
                        .collect(Collectors.toList());
                List<String> downFaults = substrings.stream()
                        .filter(s -> s.matches(".*[A-Za-z]相下管故障"))
                        .collect(Collectors.toList());
                boolean hasMatchingPair = false;
                boolean hasMultiplePairs = false;
                for (String upFault : upFaults) {
                    String upFaultLetter = upFault.replaceAll("[^A-Za-z]", "");
                    int matchingDownFaults = 0;
                    for (String downFault : downFaults) {
                        String downFaultLetter = downFault.replaceAll("[^A-Za-z]", "");
                        if (upFaultLetter.equals(downFaultLetter)) {
                            matchingDownFaults++;
                            hasMatchingPair = true;
                        }
                    }
                    if (matchingDownFaults > 1) {
                        hasMultiplePairs = true;
                        break;
                    }
                }
                return totalFault && hasMatchingPair && !hasMultiplePairs;
            }
        };
        UserDefinedFunction checkSubstringUDF = functions.udf(checkSubstring, DataTypes.BooleanType);
        Dataset<Row> sortedAggregatedPhmGzmc = sortedDS.filter(checkSubstringUDF.apply(sortedDS.col("sorted_aggregated_phm_gzmc")));

        //定义一个UDF,将当前的排序结果中`sorted_aggregated_phm_gzmc`补充完整
        // 定义一个UDF，将给定的前缀和phm_bjwz添加到逗号分隔的字符串中的每个元素上
        UDF2<String, String, String> concatPrefix = (phm_bjwz, s) -> {
            List<String> elements = Arrays.asList(s.split(","));
            return elements.stream()
                    .map(e -> "主变流器" + phm_bjwz + e.trim())
                    .collect(Collectors.joining(", "));
        };

        // 注册UDF
        UserDefinedFunction concatPrefixUDF = functions.udf(concatPrefix, DataTypes.StringType);
        // 将sorted_aggregated_phm_gzmc这个字段补充完整: 主变流器1L1元件总故障, 主变流器1L1C相上管故障, 主变流器1L1C相下管故障, 主变流器1L1D相上管故障, 主变流器1L1D相下管故障
        Dataset<Row> concatPrefixDS = sortedAggregatedPhmGzmc.withColumn("sorted_aggregated_phm_gzmc", concatPrefixUDF.apply(col("phm_bjwz"), col("sorted_aggregated_phm_gzmc")));
        /*
            +----------+--------+-------------+----------------------------------------------------------------------+---------------------------------------------------------------------------------+
            |s_train_id|phm_bjwz|gzmc_group_id|aggregated_phm_gzmc                                                   |sorted_aggregated_phm_gzmc                                                       |
            +----------+--------+-------------+----------------------------------------------------------------------+---------------------------------------------------------------------------------+
            |HXD1D-1030|1       |L1           |[L1C相下管故障, L1C相上管故障, L1元件总故障, L1D相下管故障, L1D相上管故障]                     |主变流器1L1元件总故障, 主变流器1L1C相上管故障, 主变流器1L1C相下管故障, 主变流器1L1D相上管故障, 主变流器1L1D相下管故障         |
            |HXD1D-1019|1       |L2           |[L2A相下管故障, L2A相上管故障, L2元件总故障, L2元件总故障]                                |主变流器1L2元件总故障, 主变流器1L2A相上管故障, 主变流器1L2A相下管故障                                       |
            |HXD1D-1001|1       |ACC          |[ACC的U相上管故障, ACC的U相上管故障, ACC的V相上管故障, ACC元件总故障, ACC的W相下管故障, ACC的V相下管故障]|主变流器1ACC元件总故障, 主变流器1ACC的U相上管故障, 主变流器1ACC的V相上管故障, 主变流器1ACC的V相下管故障, 主变流器1ACC的W相下管故障|
            |HXD1D-1066|1       |L1           |[L1元件总故障, L1A相下管故障, L1B相上管故障, L1B相下管故障, L1A相上管故障]                     |主变流器1L1元件总故障, 主变流器1L1A相上管故障, 主变流器1L1A相下管故障, 主变流器1L1B相上管故障, 主变流器1L1B相下管故障         |
            |HXD1D-1050|1       |L2           |[L2元件总故障, L2D相上管故障, L2C相上管故障, L2C相下管故障, L2D相下管故障]                     |主变流器1L2元件总故障, 主变流器1L2C相上管故障, 主变流器1L2C相下管故障, 主变流器1L2D相上管故障, 主变流器1L2D相下管故障         |
            +----------+--------+-------------+----------------------------------------------------------------------+---------------------------------------------------------------------------------+
         */
        // 定义正则表达式,过滤出:A、B相上下管同时故障的数据.
        String regexAB = "^主变流器[12]\\s*[LM][123]元件总故障\\s*,\\s*主变流器[12]\\s*[LM][123]A相上管故障\\s*,\\s*主变流器[12]\\s*[LM][123]A相下管故障\\s*,\\s*主变流器[12]\\s*[LM][123]B相上管故障\\s*,\\s*主变流器[12]\\s*[LM][123]B相下管故障$";
        // 定义正则表达式,过滤出:C、D相上下管同时故障的数据.
        String regexCD = "^主变流器[12]([LM][123]元件总故障)\\s*,\\s*主变流器[12]([LM][123]C相上管故障)\\s*,\\s*主变流器[12]([LM][123]C相下管故障)\\s*,\\s*主变流器[12]([LM][123]D相上管故障)\\s*,\\s*主变流器[12]([LM][123]D相下管故障)$";

        // 使用正则表达式过滤 sorted_aggregated_phm_gzmc 字段
        Column regexFilterAB = functions.col("sorted_aggregated_phm_gzmc").rlike(regexAB);
        Column regexFilterCD = functions.col("sorted_aggregated_phm_gzmc").rlike(regexCD);
        Dataset<Row> abDS = concatPrefixDS.filter(regexFilterAB);
        Dataset<Row> abDSFinal = abDS.withColumn("jgmc", lit("A、B相上下管同时故障"))
                .withColumn("xxms", col("sorted_aggregated_phm_gzmc"))
                .withColumn("czjy", lit("检查或更换对应模块的电源模块"));
         /*
            +----------+--------+-------------+--------------------------------------------------------------------------+------------------------------------------------------------------------+--------------------+-----------------------------------------------------------------------------------------------------------+----------------------------+
            |s_train_id|phm_bjwz|gzmc_group_id|aggregated_phm_gzmc                                                       |sorted_aggregated_phm_gzmc                                              |jgmc                |xxms                                                                                                       |czjy                        |
            +----------+--------+-------------+--------------------------------------------------------------------------+------------------------------------------------------------------------+--------------------+-----------------------------------------------------------------------------------------------------------+----------------------------+
            |HXD1D-1066|1       |L1           |[L1元件总故障, L1A相下管故障, L1B相上管故障, L1B相下管故障, L1A相上管故障]|L1元件总故障, L1A相上管故障, L1A相下管故障, L1B相上管故障, L1B相下管故障|A、B相上下管同时故障|变流器1L1元件总故障, 变流器1L1A相上管故障, 变流器1L1A相下管故障, 变流器1L1B相上管故障, 变流器1L1B相下管故障|检查或更换对应模块的电源模块|
            +----------+--------+-------------+--------------------------------------------------------------------------+------------------------------------------------------------------------+--------------------+-----------------------------------------------------------------------------------------------------------+----------------------------+
         */
        Dataset<Row> cdDS = concatPrefixDS.filter(regexFilterCD);
        Dataset<Row> cdDSFinal = cdDS.withColumn("jgmc", lit("C、D相上下管同时故障"))
                .withColumn("xxms", col("sorted_aggregated_phm_gzmc"))
                .withColumn("czjy", lit("检查或更换对应模块的电源模块"));
        /*
        +----------+--------+-------------+-------------------------------------------------+------------------------------------------------------------------------+-----------+------------------------------------------------------------------------+--------------+
        |s_train_id|phm_bjwz|gzmc_group_id|aggregated_phm_gzmc                              |sorted_aggregated_phm_gzmc                                              |jgmc       |xxms                                                                    |czjy          |
        +----------+--------+-------------+-------------------------------------------------+------------------------------------------------------------------------+-----------+------------------------------------------------------------------------+--------------+
        |HXD1D-1030|1       |L1           |[L1C相下管故障, L1C相上管故障, L1元件总故障, L1D相下管故障, L1D相上管故障]|主变流器1L1元件总故障, 主变流器1L1C相上管故障, 主变流器1L1C相下管故障, 主变流器1L1D相上管故障, 主变流器1L1D相下管故障|A、B相上下管同时故障|主变流器1L1元件总故障, 主变流器1L1C相上管故障, 主变流器1L1C相下管故障, 主变流器1L1D相上管故障, 主变流器1L1D相下管故障|检查或更换对应模块的电源模块|
        |HXD1D-1050|1       |L2           |[L2元件总故障, L2D相上管故障, L2C相上管故障, L2C相下管故障, L2D相下管故障]|主变流器1L2元件总故障, 主变流器1L2C相上管故障, 主变流器1L2C相下管故障, 主变流器1L2D相上管故障, 主变流器1L2D相下管故障|A、B相上下管同时故障|主变流器1L2元件总故障, 主变流器1L2C相上管故障, 主变流器1L2C相下管故障, 主变流器1L2D相上管故障, 主变流器1L2D相下管故障|检查或更换对应模块的电源模块|
        +----------+--------+-------------+-------------------------------------------------+------------------------------------------------------------------------+-----------+------------------------------------------------------------------------+--------------+
         */
        // 当前的总结果 差集 abDS∩cdDS 的结果就是 同向上下管故障.
        Dataset<Row> sameDSFinal = concatPrefixDS.except(abDS.union(cdDS))
                .withColumn("jgmc", lit("同相上下管故障"))
                //.withColumn("mxms", concat_ws(", ", expr("transform(split(sorted_aggregated_phm_gzmc, ', '), x -> concat('变流器', phm_bjwz, x))")))
                .withColumn("xxms", col("sorted_aggregated_phm_gzmc"))
                .withColumn("czjy", lit("检查或更换对应模块的电源模块"));
        // 单管故障的处理:一个总故障+任意一条单管故障. 这里为了不丢数据,也会把: L2A相上管故障 | ACC的U相上管故障 | M3元件总故障, M3A相下管故障 这种数据都算成 单管故障进行入库.
        // sortedDS.alias("t1").join(sortedAggregatedPhmGzmc.alias("t2"), expr("t1.s_train_id = t2.s_train_id and t1.phm_bjwz = t2.phm_bjwz"), "left_anti").show(false);

        Dataset<Row> dgDSFinal = sortedDS.except(sortedAggregatedPhmGzmc)
                .withColumn("jgmc", lit("单管故障"))
                //.withColumn("mxms", concat_ws(", ", expr("transform(split(sorted_aggregated_phm_gzmc, ', '), x -> concat('变流器', phm_bjwz, x))")))
                .withColumn("xxms", col("sorted_aggregated_phm_gzmc"))
                .withColumn("czjy", lit("检查或更换对应模块的电源模块"));
        Dataset<Row> conditionsUnionDS = abDSFinal.union(cdDSFinal).union(sameDSFinal).union(dgDSFinal).selectExpr("s_train_id", "phm_bjwz", "s_fault_time", "jgmc", "xxms", "czjy");


        // 创建 UDF 生成 UUID
        UserDefinedFunction generateUUID = udf((input) -> UUID.randomUUID().toString().substring(0, 16), DataTypes.StringType);
        // 注册 UDF
        spark.udf().register("generateUUID", generateUUID);
        HashMap<String, String> updateColName = new HashMap<>();
        Dataset<Row> resultInAdsPhmFault = inputDataRealFaults.selectExpr("s_train_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2",
                        "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt")
                .dropDuplicates().alias("t1").join(conditionsUnionDS.withColumn("mxjg", lit("元件故障")).alias("t2"), expr("t1.s_train_id = t2.s_train_id and t1.phm_bjwz = t2.phm_bjwz"), "inner")
                .withColumn("mx_id", lit("10003"))
                .withColumn("s_fault_id", callUDF("generateUUID", current_timestamp()));
        // 查看最终结果
        Dataset<Row> resFinalAdsPhm = resultInAdsPhmFault.selectExpr("t1.s_train_id", "s_fault_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1",
                "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb",
                "xxms as phm_gzmc", "t1.phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt", "mxjg").union(zbgggDSWithDWDPHMFAULT);

        // TODO 这里就是最终返回的最终结果. ...... ads_phm_fault_2
        //将当前表中的数据直接入到`ads_phm_fault`表中
        resFinalAdsPhm.selectExpr("s_train_name as s_train_id", "s_fault_id", " '10003' as mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1",
                "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg",
                "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt").createOrReplaceTempView("ads_phm_fault_2");
        //MysqlUtils_2.writeDataToMysqlTable(resFinalAdsPhm, "ads_phm_fault", SaveMode.Append);

        // TODO 这里是最终返回的最终结果. ........ t_phm_sqwx
        Dataset<Row> rowDataset = inputDataRealFaults
                // id 需要后续生成
                .selectExpr("s_train_id as s_train_id_2", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz as bjwz_2", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "cksj").dropDuplicates();

        // 当前实时数据中模型名称和结果描述的聚合结果......
        Dataset<Row> ResFinalSqwx = conditionsUnionDS
                .withColumnRenamed("phm_bjwz", "bjwz")
                .withColumnRenamed("jgmc", "mxmc")
                .withColumnRenamed("xxms", "mxms")
                .groupBy(col("s_train_id"), col("bjwz"), col("czjy"))
                .agg(concat_ws(",", collect_set(col("mxmc"))).alias("mxmc"),
                        concat_ws(",", collect_set(col("mxms")).alias("mxms")).alias("mxms")
                );

        Dataset<Row> ResFinalSqwx_Zbg = zbgggDSWithSQWX.selectExpr("s_train_id", "bjwz", "mxmc", "mxms").withColumn("czjy", lit("检查或更换对应模块的脉冲/驱动板、IGBT"))
                .groupBy(col("s_train_id"), col("bjwz"), col("czjy"))
                .agg(concat_ws(",", collect_set(col("mxmc"))).alias("mxmc1"), concat_ws(",", collect_set(col("mxms"))).alias("mxms"));
        Dataset<Row> zbgAggUnoinOtherAggDS = ResFinalSqwx.union(ResFinalSqwx_Zbg);
        Dataset<Row> rowDataset1 = zbgAggUnoinOtherAggDS.alias("t1").join(rowDataset.alias("t2"), expr("t1.s_train_id = t2.s_train_id_2 and t1.bjwz = t2.bjwz_2"), "left")
                .withColumn("create_time", current_timestamp())
                .withColumn("id", concat_ws("-", col("s_train_id"), lit("10003"), col("cksj")))
                .withColumn("czjy", lit("检查或更换对应模块的脉冲/驱动板、IGBT"));

        Dataset<Row> rowDataset2 = selectColumnsWithSQWX(rowDataset1);
        Dataset<Row> rowDataset3 = rowDataset2
                .withColumn("mxmc", rowDataset2.col("mxmc"))
                .withColumn("mxms", rowDataset2.col("mxms"))
                .withColumn("mx", lit("元件故障"))
                .withColumn("czjy", lit("检查或更换对应模块的脉冲/驱动板、IGBT"));


        // TODO 这里是最终插入到 `t_phm_sqwx` 表中的数据

        // MysqlUtils_2.upsertDatasetToMySQL(rowDataset3, "t_phm_sqwx");
        rowDataset3.selectExpr("concat_ws('-',id,bjwz) as id", "s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "mx", "mxmc", "mxms", "xfzt", "htzt", "hkzt", "status", "czjy", "create_time")
                .createOrReplaceTempView("t_phm_sqwx_2");
        // ====================================================================================================下面是健康评估的逻辑 :
        /*
            TODO 处理逻辑: 抓起到`ads_phm_fault`的当前躺的历史数据(当前躺的定义: S_HKZT=0 )
                返回值结果:车型-车号:部件PJM:位置:模型代码:评估结果.
                S_TRAIN_ID:S_FAULT_BW_CODE2:phm_bjwz:10003:
         */
        // 这里的健康评估逻辑: S_TRAIN_ID、S_FAULT_BW_CODE2、phm_bjwz进行聚合,统计phm_gzmc的个数,如果
        // ====================================================================================================上面是健康评估的逻辑 :
    }


    /**
     * 牵引变流器模块过热故障视情维修模型实现:所有模块均过热、第一重模块过热、第二重模块过热、第三重模块过热、辅逆模块过热
     *
     * @param spark
     * @param inputData
     */
    public static void processFaultType3(SparkSession spark, Dataset<Row> inputData) throws IOException, ParseException {
        Dataset<Row> overheatDSRealFault = inputData.filter(col("phm_lb").equalTo("变流器模块过热报警"));
        Dataset<Row> sqwxBaseInfo = overheatDSRealFault.selectExpr("s_train_id as s_train_id_2", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm",
                "ssbj", "bjpjm", "bjwz as bjwz_2", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "cksj").dropDuplicates();
        // 后续聚合结束后,添加: id = s_train_id-"1022"-cksj | mx |mxms | mxmc | create_time
        // s_fault_id:随机生成. |mx_id|phm_gzmc | s_fault_time | phm_gzmc |
        Dataset<Row> adsPhmFaultInfo = overheatDSRealFault.selectExpr("s_train_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2",
                "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "phm_lb", "phm_bjwz as phm_bjwz_2", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt").dropDuplicates();
        // 这里是实时数据中 ads_phm_fault表中的基础字段

        // 获取实时数据组合情况,实际上是处理会话划分问题,10秒内划分窗口,并判断窗口内数据组合情况.在处理实时数据之前,我需要先去`ads_phm_fault`这张历史表中拉去`s_fault_time`的最大时间 到 10秒前的数据,主要处理跨批次10秒内的数据.
        Dataset<Row> lastBatchTenDS = MysqlUtils_2.readMysqlTable(spark, "ads_phm_fault")
                // 过滤条件:当前趟(s_hkzt=0) 上一批次 变流器模块过热报警 的报警数据.
                .filter(col("phm_lb").equalTo("变流器模块过热报警").and(col("s_hkzt").equalTo(lit("0"))))
                .selectExpr("s_train_id", "mx_id", "mxjg", "phm_gzmc", "s_fault_time", "phm_bjwz");


        // 获取当前批次的实时故障数据
        Dataset<Row> localBatchRealFaults = overheatDSRealFault.selectExpr("s_train_id", "mx_id", "mxmc as mxjg", "phm_gzmc", "s_fault_time", "phm_bjwz");
        Dataset<Row> allProcessRealFaults = null;
        if (lastBatchTenDS.count() > 0) {
            String lastBatchMaxFaultTimeStr = lastBatchTenDS
                    .agg(max("s_fault_time")).collectAsList().get(0).getString(0); // // 获取最大的 s_fault_time
            // 定义时间格式
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
            // 使用自定义时间格式将字符串形式的时间转换为 Timestamp 类型
            Timestamp maxTime = new Timestamp(dateFormat.parse(lastBatchMaxFaultTimeStr).getTime());
            // 将最大时间转换为 Unix 时间戳（以毫秒为单位），然后减去 10 秒
            long maxTimeUnix = maxTime.getTime() - (10 * 1000);
            // 将 Unix 时间戳（以毫秒为单位）转换回 Timestamp 类型
            Timestamp maxTimeMinus10Seconds = new Timestamp(maxTimeUnix);
            // 使用 expr() 创建一个新的筛选条件，选择 s_fault_time 在最大时间的 10秒前的数据.
            Column filterCondition = expr("s_fault_time >= '" + dateFormat.format(maxTimeMinus10Seconds) + "'");
            // 获取到上个批次最后5秒的数据
            Dataset<Row> lastBatch10SDs = lastBatchTenDS.filter(filterCondition);
            allProcessRealFaults = lastBatch10SDs.union(localBatchRealFaults);
        } else {
            // 如果当前表是空的,则直接处理实时故障数据. .orderBy(col("s_fault_time")
            allProcessRealFaults = localBatchRealFaults;
        }
        allProcessRealFaults = allProcessRealFaults.withColumn("phm_gzmc", regexp_replace(col("phm_gzmc"), "变流器[1-2]", ""));
        // 定义窗口函数
        WindowSpec windowSpec = Window.partitionBy(col("s_train_id"), col("phm_bjwz")).orderBy("s_fault_time");

        // 计算每一行与上一行的时间差（单位：秒）
        Dataset<Row> allProcessRealFaultsSecondDiff = allProcessRealFaults.withColumn(
                "time_diff",
                unix_timestamp(col("s_fault_time")).minus(lag(unix_timestamp(col("s_fault_time")), 1).over(windowSpec))
        );

        // 判断时间差是否大于等于5秒，如果是，则表示新的会话开始
        Dataset<Row> allProcessRealFaultsSessionFlag = allProcessRealFaultsSecondDiff.withColumn(
                "is_new_session",
                when(col("time_diff").isNull().or(col("time_diff").geq(10)), 1).otherwise(0)
        );
        // 计算会话 ID
        Dataset<Row> allProcessRealFaultsSessionId = allProcessRealFaultsSessionFlag.withColumn(
                "session_id",
                sum("is_new_session").over(windowSpec)
        );


        // 这里需要注意写法:一定是先执行`selectExpr`选中要操作的列,再进行后续的分组聚合操作.
        Dataset<Row> resAggAllDs = allProcessRealFaultsSessionId.selectExpr("s_train_id", "mx_id", "phm_bjwz", "session_id", "mxjg", "phm_gzmc", "s_fault_time")
                .groupBy(col("s_train_id"), col("mx_id"), col("mxjg"), col("phm_bjwz"), col("session_id"))
                .agg(
                        collect_set(col("phm_gzmc")).alias("phm_gzmc_array"), // 聚合结束后需要起一个别名
                        min(col("s_fault_time")).alias("min_fault_time")
                );
        //聚合后结果

        // 这里最终聚合完成后会返回你所有的字段:s_train_id、mx_id、bjwz、session_id、phm_gzmc_array、min_fault_time
        Dataset<Row> conditionResAllDs = resAggAllDs
                .withColumn("element_nums", size(col("phm_gzmc_array")))
                .withColumn("gzmc", concat_ws(",", col("phm_gzmc_array")));

        Dataset<Row> finalResDS = conditionResAllDs.withColumn("condition",
                when(col("element_nums").geq(2), lit("所有模块均过热")) // 当 element_nums 大于等于 2 时 "所有模块均过热"
                        .otherwise(concat(col("gzmc"))) // 对于其他情况，设置值为 具体的值.
        ).drop("phm_gzmc_array");

        /*
            +----------+-----+----+----+----------+-------------------+------------+---------------+---------+
            |s_train_id|mx_id|mxjg|phm_bjwz|session_id|min_fault_time     |element_nums|gzmc           |condition|
            +----------+-----+----+----+----------+-------------------+------------+---------------+---------+
            |HXD1D-1066|10022|模块过热|1   |1         |2022-03-17 10:00:19|2           |第一重模块过热,第二重模块过热|所有模块均过热  |
            |HXD1D-1066|10022|模块过热|1   |2         |2022-03-17 10:00:34|1           |第三重模块过热        |第三重模块过热  |
            |HXD1D-1066|null |null|1   |3         |2022-03-17 10:00:55|1           |辅逆模块过热         |辅逆模块过热   |
            |HXD1D-1002|10022|模块过热|1   |1         |2023-03-17 10:00:22|1           |第三重模块过热        |第三重模块过热  |
            +----------+-----+----+----+----------+-------------------+------------+---------------+---------+
         */
        // 关联入库操作 .....
        Dataset<Row> insertSqwx = finalResDS.alias("t1").join(sqwxBaseInfo.alias("t2"), expr("t1.s_train_id = t2.s_train_id_2 and t1.phm_bjwz = t2.bjwz_2"), "left")
                .withColumn("id", concat_ws("-", col("t1.s_train_id"), col("mx_id"), col("cksj")))
                .withColumn("create_time", current_timestamp())
                .withColumn("czyj", lit("检查对应DIO插件或外部插头，有问题则更换或紧固"))
                .withColumnRenamed("condition", "mxmc")
                .withColumnRenamed("gzmc", "mxms");
        Dataset<Row> rowDataset1 = insertSqwx.withColumn("mxms_1", concat_ws(",", col("mxms")))
                .withColumnRenamed("mxjg", "mx");
        Dataset<Row> sqwxUpsertFinal = rowDataset1.selectExpr("id", "t1.s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "t1.phm_bjwz", "bjxlh", "bjdjm",
                "mx", "mxmc", "mxms_1 as mxms", "xfzt", "htzt", "hkzt", "status").filter(col("cx").isNotNull());
        Dataset<Row> aggSqwxFinal = sqwxUpsertFinal.groupBy(col("id"), col("s_train_id"), col("cx"), col("lj"), col("ljjc"), col("ljm"), col("psd"), col("psdjc"), col("psddm"),
                        col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("phm_bjwz"), col("bjxlh"), col("bjdjm"), col("xfzt"), col("htzt"), col("hkzt"), col("status"), col("mx"))
                .agg(
                        concat_ws(",", collect_set(col("mxmc"))).alias("mxmc"),
                        concat_ws(",", collect_set(col("mxms"))).alias("mxms")
                ).withColumn("create_time", current_timestamp())
                .withColumn("czjy", lit("检查对应DIO插件或外部插头，有问题则更换或紧固"));

        Dataset<Row> rowDataset2 = aggSqwxFinal.selectExpr("concat_ws('-',id,phm_bjwz) as id", "s_train_id", "cx", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "phm_bjwz as bjwz", "bjxlh", "bjdjm", "mx", "mxmc", "mxms", "xfzt", "htzt", "hkzt", "status", "czjy", "create_time");
        //MysqlUtils_2.upsertDatasetToMySQL(aggSqwxFinal, "t_phm_sqwx");
        rowDataset2.createOrReplaceTempView("t_phm_sqwx_3");

        // ============================================================================  最终插入视情维修表的数据
        //selectColumnsWithSQWX(insertSqwxFinal).filter(expr("cx is not null")).createOrReplaceTempView("t_phm_sqwx_3");
        //MysqlUtils_2.upsertDatasetToMySQL(insertSQWXDS, "t_phm_sqwx");
        // ============================================================================  最终插入视情维修表的数据
        // 创建 UDF 生成 UUID
        UserDefinedFunction generateUUID = udf((input) -> UUID.randomUUID().toString().substring(0, 16), DataTypes.StringType);
        // 注册 UDF
        spark.udf().register("generateUUID", generateUUID);

        // 这里需要给定一个过滤条件:如果当前躺的上一个批次中最后10秒的故障信息,在本批次没有形成10秒内的连续故障,则当前行在上一个批次已经处理过了,是一条单独的数据,这里不能在要了,需要删除.
        Dataset<Row> insertAdsPhmFault = finalResDS.alias("t1")
                .join(adsPhmFaultInfo.alias("t2"), expr("t1.s_train_id = t2.s_train_id and t1.phm_bjwz = t2.phm_bjwz_2"), "left").drop("s_train_id")
                .filter(expr("s_train_name is not null"));
        Dataset<Row> rowDataset = insertAdsPhmFault
                .withColumnRenamed("min_fault_time", "s_fault_time")
                .withColumnRenamed("condition", "s_fault_name")
                .withColumn("s_fault_id", callUDF("generateUUID", current_timestamp()))
                .withColumnRenamed("gzmc", "phm_gzmc")
                .withColumn("mx_id", lit("10022"));

        // ================================ 最终插入到 ads_phm_faule 表中的数据 =========================================
        rowDataset.selectExpr("s_train_name as s_train_id", "s_fault_id",
                "case when mx_id is not null then mx_id else '333' end mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1",
                "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg",
                "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt").createOrReplaceTempView("ads_phm_fault_3");
        // MysqlUtils_2.writeDataToMysqlTable(rowDataset1, "ads_phm_fault", SaveMode.Append);
        // ================================ 最终插入到 ads_phm_faule 表中的数据 =========================================
    }


    /**
     * 定义一个方法,找到当前 DataSet中指定字段内连续三天数据是 一条总故障 + 两条同相上下管故障的故障信息,例如只能找到这样的: L2元件总故障, L2A相上管故障, L2A相下管故障
     * 目前代码中没有使用,感觉这种连续的判定不合理.后续如果修改逻辑,可以考虑放开使用.
     */
    public static boolean isValid(String input) {
        String mainPattern = "(ACC|L[123]|M[123])元件总故障";
        String subPattern = "((ACC|[LM]\\d)([A-D]|U|V|W)相(上|下)管故障)";

        Pattern mainPatternCompiled = Pattern.compile(mainPattern);
        Matcher mainMatcher = mainPatternCompiled.matcher(input);

        Pattern subPatternCompiled = Pattern.compile(subPattern);
        Matcher subMatcher = subPatternCompiled.matcher(input);

        if (mainMatcher.find()) {
            Map<String, Map<String, Integer>> countMap = new HashMap<>();

            while (subMatcher.find()) {
                String key = subMatcher.group(2) + subMatcher.group(3);
                String position = subMatcher.group(4);
                Map<String, Integer> positionMap = countMap.getOrDefault(key, new HashMap<>());
                positionMap.put(position, positionMap.getOrDefault(position, 0) + 1);
                countMap.put(key, positionMap);
            }

            for (Map<String, Integer> positionMap : countMap.values()) {
                if (positionMap.getOrDefault("上", 0) > 0 && positionMap.getOrDefault("下", 0) > 0) {
                    return true;
                }
            }
        }

        return false;
    }

    /**
     * 返回视情维修最终入库的表字段.
     *
     * @param inputDS
     * @return
     */
    public static Dataset<Row> selectColumnsWithSQWX(Dataset<Row> inputDS) {
        return inputDS.select(
                col("id"), col("s_train_id"), col("cx"), col("lj"), col("ljjc"), col("ljm"), col("psd"), col("psdjc"), col("psddm"),
                col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("bjwz"), col("bjxlh"), col("bjdjm"), col("mxms"), col("mxmc"),
                col("create_time"), col("xfzt"), col("htzt"), col("hkzt"), col("status")
        );
    }

    /**
     * 返回 ads_phm_fault中的入库字段. 传入一张宽表,选中指定的列作为插入列.
     * 用法示例:
     * <p>
     * Map<String, String> columnMapping = new HashMap<>();
     * columnMapping.put("s_train_id_2", "s_train_id"); // old字段,new字段
     * Dataset<Row> selectedColumnsDS = ColumnSelector.selectColumns(inputDS, columnMapping);
     *
     * @param inputDS
     * @param columnMapping
     * @return
     */
    public static Dataset<Row> selectColumnsAdsPhmFault
    (Dataset<Row> inputDS, Map<String, String> columnMapping) {
        // 使用 columnMapping 修改 inputDS 中的列名
        Dataset<Row> modifiedDS = inputDS;
        for (Map.Entry<String, String> entry : columnMapping.entrySet()) {
            String oldColumnName = entry.getKey();
            String newColumnName = entry.getValue();
            modifiedDS = modifiedDS.drop(newColumnName).withColumnRenamed(oldColumnName, newColumnName);
        }
        return modifiedDS.select(
                col("s_train_id"), col("s_fault_id"), col("mx_id"), col("s_train_name"), col("s_train_type_code"),
                col("s_fault_source"), col("s_fault_time"), col("s_fault_sys"), col("s_fault_sys_code1"), col("s_fault_sys_code2"), col("s_fault_bw"),
                col("s_fault_bw_code1"), col("s_fault_bw_code2"), col("s_fault_code"), col("s_fault_name"), col("phm_lb"), col("phm_gzmc"),
                col("phm_bjwz"), col("zj_ms"), col("zj_fa"), col("pjxlh"), col("pjdjm"), col("s_htzt"), col("s_hkzt"), col("s_xfzt")
        );
    }


    /***
     * 创建一个方法,用来读取Kafka传过来tcms的实时数据,分批次处理.这里仅仅对当前批次的实时数据关联`t_phm_mxxq` 模型字典表,拉去后续用到的字段.
     * @param spark
     * @param jsonRDD
     * @return 当前tcms的实时报警数据
     */
    /***
     * 创建一个方法,用来读取Kafka传过来tcms的实时数据,分批次处理.这里仅仅对当前批次的实时数据关联`t_phm_mxxq` 模型字典表,拉去后续用到的字段.
     * @param spark
     * @param jsonRDD
     * @return 当前tcms的实时报警数据
     */
    public static Dataset<Row> processJsonRDD(SparkSession spark, JavaRDD<String> jsonRDD) throws IOException {
        Dataset<Row> mxxqDS = MysqlUtils_2.readMysqlTable(spark, "t_phm_mxxq");
        Dataset<Row> tcmsRealFaultsDS = spark.read().json(jsonRDD)
                .select(
                        // 视情维修表中字段
                        // 车型-车号
                        col("s_train_id"),
                        //车型
                        col("cx"),
                        //路局
                        col("lj"),
                        //路局简称
                        col("ljjc"),
                        //路局码
                        col("lj_id").alias("ljm"),
                        //配属段
                        col("psd"),
                        //配属段简称
                        col("psdjc"),
                        //配属段代码
                        col("psd_id").alias("psddm"),
                        //所属系统
                        col("ssxt").alias("ssxt"),
                        //系统PJ码
                        col("xtpjm").alias("xtpjm"),
                        //所属部件
                        col("ssbj").alias("ssbj"),
                        //部件PJ码
                        col("bjpjm").alias("bjpjm"),
                        //部件位置
                        col("phm_bjwz").alias("bjwz"),
                        //部件序列号
                        col("pjxlh").alias("bjxlh"),
                        //部件单件码
                        col("pjdjm").alias("bjdjm"),
                        //下发状态
                        lit("0").alias("xfzt"),
                        //回填状态
                        lit("0").alias("htzt"),
                        //回库状态
                        lit("0").alias("hkzt"),
                        //出库时间
                        col("cksj"),
                        //标识当实时故障来源 1:变流器 0:变压器
                        lit("1").alias("status"),
                        // ads_phm_fault 表中字段
                        //车型-车号
                        col("s_train_id").alias("s_train_id_2"),
                        //故障ID
                        col("idx").alias("s_fault_id"),
                        //车型-车号
                        col("s_train_id").alias("s_train_name"),
                        //车型
                        col("cx").alias("s_train_type_code"),
                        //故障源
                        col("source").alias("s_fault_source"),
                        //故障时间
                        col("gzkssj_up").alias("s_fault_time"),
                        //故障系统
                        col("ssxt").alias("s_fault_sys"),
                        //故障系统码1
                        col("xtbm").alias("s_fault_sys_code1"),
                        //故障系统码2（PJ码）
                        col("xtpjm").alias("s_fault_sys_code2"),
                        //故障部位
                        col("ssbj").alias("s_fault_bw"),
                        //故障部位码1
                        col("bjbm").alias("s_fault_bw_code1"),
                        //故障部位码2
                        col("bjpjm").alias("s_fault_bw_code2"),
                        //故障码
                        col("gzdm_wm").alias("s_fault_code"),
                        //故障名称
                        col("gzmc").alias("s_fault_name"),
                        //故障类别
                        col("phm_lb"),
                        //phm故障名称
                        col("phm_mc").alias("phm_gzmc"),
                        //故障位置
                        col("phm_bjwz"),
                        //故障描述-株机
                        col("zj_ms"),
                        //施修方法-株机
                        col("zj_fa"),
                        //配件序列号
                        col("pjxlh"),
                        //配件单件码
                        col("pjdjm"),
                        //回填状态
                        lit("0").alias("s_htzt"),
                        //回库状态
                        lit("0").alias("s_hkzt"),
                        //下发状态
                        lit("0").alias("s_xfzt")
                );
        Dataset<Row> mxDS = mxxqDS.select(
                // 视情维修表中字段
                // 模型详情ID eg:10001
                col("mx_id"),
                //模型名称 eg:牵引变流器接触器卡合故障视情维修模型、牵引变流器元件故障视情维修模型
                col("mxmc"),
                //模型结果 eg:元件故障
                col("mxjg"),
                //结果描述 eg:单管故障
                col("jgms"),
                //详细描述 eg:变流器1L3元件总故障,变流器1L3B相下管故障
                col("xxms"),
                //处置建议 eg:检查或更换对应模块的脉冲/驱动板、IGBT
                col("czjy"),
                //适用机型 eg:HXD1D
                col("syjx")
        );
        return tcmsRealFaultsDS.join(mxDS, expr("phm_gzmc = xxms and cx=syjx"), "left_outer").select(
                // 视情维修表中字段
                // 视情维修表ID,唯一键
                concat_ws("-", col("s_train_id"), col("MX_ID"), date_format(col("cksj"), "yyyyMMddHHmmss")).alias("id"),
                date_format(col("cksj"), "yyyyMMddHHmmss").alias("cksj"),
                // 车型-车号
                col("s_train_id"),
                //车型
                col("cx"),
                //路局
                col("lj"),
                //路局简称
                col("ljjc"),
                //路局码
                col("ljm"),
                //配属段
                col("psd"),
                //配属段简称
                col("psdjc"),
                //配属段代码
                col("psddm"),
                //所属系统
                col("ssxt"),
                //系统PJ码
                col("xtpjm"),
                //所属部件
                col("ssbj"),
                //部件PJ码
                col("bjpjm"),
                //部件位置
                col("bjwz"),
                //部件序列号
                col("bjxlh"),
                //部件单件码
                col("bjdjm"),
                //模型描述,这里取的是模型字典表中的`jgms`字段,也就是 结果描述 eg:单管故障
                col("jgms").alias("mxms"),
                //视情维修这里的模型名称,取的是 模型字典`mxxq`表中的`mxjg`字段,也就是 模型结果 eg:元件故障
                col("mxjg").alias("mxmc"),
                //create_time
                current_timestamp().alias("create_time"),
                col("czjy"),
                //下发状态
                col("xfzt"),
                //回填状态
                col("htzt"),
                //回库状态
                col("hkzt"),
                //标识当实时故障来源 1:变流器 0:变压器
                col("status"),

                // ads_phm_fault 表中字段
                //车型-车号,这里取了一个别名,入表时注意一下
                col("s_train_id_2"),
                //故障ID
                col("s_fault_id"),
                //模型ID
                col("mx_id"),
                //车型-车号
                col("s_train_name"),
                //车型
                col("s_train_type_code"),
                //故障源
                col("s_fault_source"),
                //故障时间
                col("s_fault_time"),
                //故障系统
                col("s_fault_sys"),
                //故障系统码1
                col("s_fault_sys_code1"),
                //故障系统码2（PJ码）
                col("s_fault_sys_code2"),
                //故障部位
                col("s_fault_bw"),
                //故障部位码1
                col("s_fault_bw_code1"),
                //故障部位码2
                col("s_fault_bw_code2"),
                //故障码
                col("s_fault_code"),
                //故障名称
                col("s_fault_name"),
                //故障类别
                col("phm_lb"),
                //phm故障名称
                col("phm_gzmc"),
                //故障位置
                col("phm_bjwz"),
                //故障描述-株机
                col("zj_ms"),
                //施修方法-株机
                col("zj_fa"),
                //配件序列号
                col("pjxlh"),
                //配件单件码
                col("pjdjm"),
                //回填状态
                col("s_htzt"),
                //回库状态
                col("s_hkzt"),
                //下发状态
                col("s_xfzt")
        );
    }
}
