package org.example.Model_Prediction;

import org.apache.spark.sql.*;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import org.example.utils.MysqlUtils_3;

import java.io.IOException;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;

import static org.apache.spark.sql.functions.*;

/**
 * 创建一个类,编写HXD1D、HXD1C中 牵引变流器模型. 主要用来处理tcms的故障数据,根据故障数据做视情为维修和健康评估. 这里主要分成如下几种模型进行处理:
 * 普通的模型,来一条就报一条的这种.
 * 对于存在组合关系的模型: 例如 模块过热、元件故障,并且当前模型中存在5秒内、10秒内的窗口概念.
 * 对于HXD1C中,有几个模型比较特殊: 例如 牵引变流器原边过流报警故障视情维修模型 这种,存在垮位置 故障的组合关系,还存在同时报警的概念.这里也要进行处理.
 */
public class QYBLQ_JsonProcessor {


    public static void unionHealthAssessment(SparkSession spark, Dataset<Row> jsonRDD) throws AnalysisException, IOException, ParseException {
        MysqlUtils_3.readMysqlTableDs(spark, "t_phm_mxxq").createOrReplaceTempView("t_phm_mxxq");
        MysqlUtils_3.readMysqlTableDs(spark, "t_phm_sqwx").createOrReplaceTempView("t_phm_sqwx");
        MysqlUtils_3.readMysqlTableDs(spark, "ads_phm_fault").createOrReplaceTempView("ads_phm_fault");
        // TODO 2023-06-13 添加 HXD1C 的 事情维修历史数据进来.
        Dataset<Row> tPhmSqwxLocalHistory = MysqlUtils_3.readMysqlTableDs(spark, "t_phm_sqwx")
                .filter(
                        col("hkzt").equalTo("0")
                                .and(col("bjpjm").equalTo("PJ00003"))
                                .and(col("cx").isin("HXD1D", "HXD1C"))
                );
        // 拉取到 `ads_phm_fault` 表中的当前躺的历史数据,用于后续的 健康评估. TODO 2023-06-13 添加 HXD1C 的 ads_phm_fault 历史数据进来.
        Dataset<Row> adsPhmFaultLocalHistory = MysqlUtils_3.readMysqlTableDs(spark, "ads_phm_fault")
                .filter(expr("S_HKZT = '0' and S_FAULT_BW_CODE2 = 'PJ00003' and s_train_type_code in ('HXD1D','HXD1C') "));

        // 读取ch消费的kafka数据,通过车型、以及phm_lb字段对实时报警数据进行过滤,这里先处理简单的报警,来一条我就入一条,针对上面的普通模型概念的处理.
        // s_train_id,idx,cx,source,gzkssj_up,ssxt,xtbm,xtpjm,ssbj,bjbm,bjpjm,gzdm_wm,gzmc,phm_lb,phm_mc,phm_bjwz,zj_ms,zj_fa,pjxlh,pjdjm,lj,ljjc,lj_id,psd,psdjc,psd_id,cksj,train_status,
        // JCDLLXMC,RC,YYZL,ZZC,ZZRQ,LJZXGL,GZM,GZLY,ZDLB,YFYY
        jsonRDD
                .filter(expr(" bjpjm = 'PJ00003' and cx in('HXD1D','HXD1C') "))
                // TODO 2023-06-20 这里是修改前的代码
                // .select("s_train_id", "idx", "cx", "source", "gzkssj_up", "ssxt", "xtbm", "xtpjm", "ssbj", "bjbm", "bjpjm", "gzdm_wm", "gzmc", "phm_lb", "phm_mc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "lj", "ljjc", "lj_id", "psd", "psdjc", "psd_id", "rksj", "JCDLLXMC", "RC", "YYZL", "ZZC", "ZZRQ", "LJZXGL", "GZM", "GZLY", "ZDLB")
                .select("s_train_id", "idx", "cx", "cx_id", "source", "gzkssj_up", "ssxt", "xtbm", "xtpjm", "ssbj", "bjbm", "bjpjm",
                        "gzdm", "gzmc", "phm_lb", "phm_mc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "lj", "ljjc", "lj_id",
                        "psd", "psdjc", "psd_id", "cksj", "train_status", "JCDLLXMC", "RC", "YYZL", "ZZC", "ZZRQ", "LJZXGL", "GZM", "GZLY", "ZDLB")
                .filter("gzkssj_up > cksj")
                // TODO 2023-06-16
                .createOrReplaceTempView("tcmsEvent");

        // 获取没有组合关系的报警数据 '10083','10086','10092','10093'  ,'10082'
        Dataset<Row> tcms_mxxq_DS = tcmsJoinMxxqDs(spark, " not in ('10003','10022','10077','10078')");
        // 处理普通的模型,来一条就报一条的数据. 入 ads_phm_fault
        insertIntoAdsPhmFault(tcms_mxxq_DS);
        // 拉取当前批次中入 t_phm_sqwx 表中的字段. 这里需要进行聚合操作 '10083','10086','10092','10093'
        upsertIntoTPhmSqwx(spark, tcms_mxxq_DS, " not in ('10003','10022','10077','10078')");
        // 返回通用模型的健康评估结果
        commonHealthEvaluate(spark, adsPhmFaultLocalHistory, tcms_mxxq_DS);
        //TODO  处理 模块过热 10022、10077
        processFaultType22(spark, tcmsJoinMxxqDs(spark, "in ('10022','10077')"), tPhmSqwxLocalHistory, adsPhmFaultLocalHistory);
        // 返回健康评估的结果.
        spark.sql("select * from pgjg_1 UNION select * from pgjg_2 UNION select * from pgjg_22").dropDuplicates().createOrReplaceTempView("test_mx");
        //System.out.println(" = = = = = 健康评估结果 = = = =  = =");
        //spark.sql("select  * from test_mx").show(100, false);
        // 入事情维修的最终结果汇总.
        MysqlUtils_3.upsertDatasetToMySQL(spark.sql("select * from sqwx_1 UNION select * from sqwx_22"), "t_phm_sqwx");
//        System.out.println("164行 . . . . . . . test_mx ");
//        spark.sql("select * from test_mx").show(20, false);

        // TODO 这里涉及到几个特殊模型的数据处理:10083,原边过流报警故障(9),10086,原边接地报警故障(12),10092,低网压报警故障(18),10093,高网压报警故障(19) .这里先按照简单的每个位置上的位置来做,这个方法是跨位置的聚合,先不调用,不处理.
        // specialOneProcess(tcms_mxxq_DS_1);
        // 处理 `HXD1C` 中特殊的几个部件的健康评估结果 10083,原边过流报警故障,10086,原边接地报警故障,10092,低网压报警故障,10093,高网压报警故障
        // Dataset<Row> tcms_mxxq_DS_1 = tcmsJoinMxxqDs(spark, " in ('10083','10086','10092','10093')");
        //tcms_mxxq_DS_1.show(20, false);
    }

    /***
     * 22、牵引变流器模块过热故障视情维修模型 视情维修和健康评估结果
     * @param spark
     * @param tcms_mxxq_Mkgr
     * @param tPhmSqwxLocalHistory
     * @param adsPhmFaultLocalHistory
     * @return
     * @throws IOException
     * @throws ParseException
     */
    public static void processFaultType22(SparkSession spark, Dataset<Row> tcms_mxxq_Mkgr, Dataset<Row> tPhmSqwxLocalHistory, Dataset<Row> adsPhmFaultLocalHistory) throws IOException, ParseException {
        //if (tcms_mxxq_Mkgr != null && tcms_mxxq_Mkgr.count() > 0) {
        // 从历史数据中拉取当前模块过热中的最后10秒的数据,判断是否存在 故障 组合情况.
        Dataset<Row> adsPhmFaultLocalHistoryDS = adsPhmFaultLocalHistory.filter(expr("split(mx_id,'-')[4] in('10022','10077')")).select("s_train_id", "s_fault_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg", "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt");
//        System.out.println("adsPhmFaultLocalHistoryDS = = = = = ");
//        adsPhmFaultLocalHistoryDS.show(20, false);
        Dataset<Row> tcmsRealDataAdsPhmFaultDS = selectColumnsAdsPhmFault(tcms_mxxq_Mkgr);
//        System.out.println("tcmsRealDataAdsPhmFaultDS = = = = = ");
//        tcmsRealDataAdsPhmFaultDS.show(20, false);
        Dataset<Row> adsPhmFaultAllDS = null;
        if (adsPhmFaultLocalHistoryDS.count() > 0) {
            // 如果当前模块过热故障存在历史数据,则拉取到最后的10秒的数据,并判断是否存在故障名称的关联关系.
            String lastBatchMaxFaultTimeStr = adsPhmFaultLocalHistoryDS.agg(max("s_fault_time")).collectAsList().get(0).getString(0); //获取上一个批次中当前元件的 最大的 s_fault_time
            // 定义时间格式
            SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
            // 使用自定义时间格式将字符串形式的时间转换为 Timestamp 类型
            Timestamp maxTime = new Timestamp(dateFormat.parse(lastBatchMaxFaultTimeStr).getTime());
            // 将最大时间转换为 Unix 时间戳（以毫秒为单位），然后减去 10 秒
            long maxTimeUnix = maxTime.getTime() - (10 * 1000);
            // 将 Unix 时间戳（以毫秒为单位）转换回 Timestamp 类型
            Timestamp maxTimeMinus10Seconds = new Timestamp(maxTimeUnix);
            // 使用 expr() 创建一个新的筛选条件，选择 s_fault_time 在最大时间的 10秒前的数据.
            Column filterCondition = expr("s_fault_time >= '" + dateFormat.format(maxTimeMinus10Seconds) + "'");
            // 获取到上个批次最后10秒的数据
            Dataset<Row> lastBatch10SDs = adsPhmFaultLocalHistoryDS.filter(filterCondition);
            adsPhmFaultAllDS = lastBatch10SDs.union(tcmsRealDataAdsPhmFaultDS);
        } else {
            adsPhmFaultAllDS = tcmsRealDataAdsPhmFaultDS;
        }
//        System.out.println("adsPhmFaultAllDS = = = = ");
//        adsPhmFaultAllDS.show(20, false);
        // 定义窗口函数,这里不需要 s_fault_id , phm_gzmc , s_fault_time  需要取到最小值.
        WindowSpec windowSpec = Window.partitionBy("s_train_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt").orderBy("s_fault_time");
        // 计算每一行与上一行的时间差（单位：秒）
        Dataset<Row> allProcessRealFaultsSecondDiff = adsPhmFaultAllDS.withColumn(
                "time_diff",
                unix_timestamp(col("s_fault_time")).minus(lag(unix_timestamp(col("s_fault_time")), 1).over(windowSpec))
        );
        // 判断时间差是否大于等于10秒，如果是，则表示新的会话开始
        Dataset<Row> allProcessRealFaultsSessionFlag = allProcessRealFaultsSecondDiff.withColumn(
                "is_new_session",
                when(col("time_diff").isNull().or(col("time_diff").geq(10)), 1).otherwise(0)
        );
        // 计算会话 ID
        Dataset<Row> allProcessRealFaultsSessionId = allProcessRealFaultsSessionFlag.withColumn(
                "session_id",
                sum("is_new_session").over(windowSpec)
        );

        Dataset<Row> finalInsertAdsPhmFaultAddMxmcDS = allProcessRealFaultsSessionId.groupBy("s_train_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt", "session_id")
                .agg(
                        collect_set(col("phm_gzmc")).alias("phm_gzmc"),
                        min(col("s_fault_time")).alias("s_fault_time")
                )
                .withColumn("phm_gzmc", when(size(col("phm_gzmc")).geq(2), lit("所有模块均过热")) // 当 element_nums 大于等于 2 时 "所有模块均过热"
                        .otherwise(regexp_replace(concat_ws("", col("phm_gzmc")), "(主变1|主变2|变流器1|变流器2)", ""))
                )
                .withColumn("s_fault_id", expr("uuid()"));
        Dataset<Row> selectFinalInsertAdsPhmFaultAddMxmcDS = finalInsertAdsPhmFaultAddMxmcDS.select("s_train_id", "s_fault_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg", "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt");
        MysqlUtils_3.writeDataToMysqlTable(selectFinalInsertAdsPhmFaultAddMxmcDS, "ads_phm_fault", SaveMode.Append);

        // 视情维修处理:
        // t_phm_sqwx 这张表中的历史数据字段.
        //s_train_id,cx,lj,ljjc,ljm,psd,psdjc,psddm,ssxt,xtpjm,ssbj,bjpjm,bjwz,bjxlh,bjdjm,xfzt,htzt,hkzt,status,czjy,mx,mxmc,mxms,dllx,rc,yyzl,sccj,zzrq,ljzx,gzm,gzly,zdlb,yfyy,hkzbjy,hkjxjy,cksj,mx_id
        Dataset<Row> localHistorySqwxDS = selectColumnsTPhmSqwxOut(tPhmSqwxLocalHistory.filter(expr("split(id,'-')[4] in('10022','10077')")))
                .withColumn("cksj", expr("substring_index(id, '-', -1)")) // 从 id 字段中截取到 cksj 出库时间这个字段的值,这里是经过格式化后的数据,eg: 20230320121035
                .withColumn("mx_id", expr("substring_index(substring_index(id, '-', 5), '-', -1)")) // 获取到 mx_id 这个字段的值,也就是: 1066 这种
                .drop("id").drop("create_time"); // 这里需要将实时数据 开窗聚合的结果 添加这两个字段. id、create_time
        // 当前批次中 tcms中实时报警数据字段,选中待插入到 t_phm_sqwx 这个表中的关键字段
        //s_train_id,cx,lj,ljjc,ljm,psd,psdjc,psddm,ssxt,xtpjm,ssbj,bjpjm,bjwz,bjxlh,bjdjm,xfzt,htzt,hkzt,status,czjy,mx,mxmc,mxms,dllx,rc,yyzl,sccj,zzrq,ljzx,gzm,gzly,zdlb,yfyy,hkzbjy,hkjxjy,cksj,mx_id
        Dataset<Row> tcmsRealDataSqwxDS = selectColumnsTPhmSqwxIn(tcms_mxxq_Mkgr).select("s_train_id", "cx", "cx_id", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "czjy", "mx", "mxmc", "mxms", "dllx", "rc", "yyzl", "sccj", "zzrq", "ljzx", "gzm", "gzly", "zdlb", "yfyy", "hkzbjy", "hkjxjy", "cksj", "mx_id");

        // 拉取到 ads_phm_fault 表中的当前趟中所有的报警数据.
        Dataset<Row> sqwxFinalDS = adsPhmFaultLocalHistoryDS.union(selectFinalInsertAdsPhmFaultAddMxmcDS)
                .select("s_train_id", "mx_id", "s_fault_bw_code2", "mxjg", "phm_gzmc", "s_fault_time")
                .groupBy("s_train_id", "mx_id", "s_fault_bw_code2", "mxjg")
                .agg(
                        concat_ws(",", collect_set(col("phm_gzmc"))).as("phm_gzmc"),
                        min(col("s_fault_time")).as("fssj")
                )
                .selectExpr("mx_id", "s_fault_bw_code2", "mxjg", "distinctString(phm_gzmc) as phm_gzmc", "fssj");


        Dataset<Row> sqwxBaseInfo = localHistorySqwxDS.union(tcmsRealDataSqwxDS).drop(col("mxms"))
                .dropDuplicates("s_train_id", "cksj", "mx_id", "cx", "cx_id", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "czjy", "status", "dllx", "rc", "yyzl", "sccj", "zzrq", "ljzx", "gzm", "gzly", "zdlb", "yfyy", "hkzbjy", "hkjxjy");

        Dataset<Row> sqwxInsertFinalDs = sqwxBaseInfo.alias("t1")
                .join(sqwxFinalDS.alias("t2"),
                        expr("t2.mx_id = concat_ws('-', t1.s_train_id, t1.bjwz, t1.mx_id, t1.cksj) and t1.bjpjm = t2.s_fault_bw_code2"), "inner")
                .withColumnRenamed("phm_gzmc", "mxms")
                //.withColumn("id", concat_ws("-", col("s_train_id"), col("bjwz"), col("mx_id"), col("cksj")))
                .select(col("t2.mx_id").as("id"), col("s_train_id"), col("cx"), col("cx_id"), split(col("s_train_id"), "-").getItem(1).as("ch"), col("lj"), col("ljjc"), col("ljm"), col("psd"), col("psdjc"),
                        col("psddm"), col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("bjwz"), col("bjxlh"),
                        col("bjdjm"), col("xfzt"), col("htzt"), col("hkzt"), current_timestamp().as("create_time"), col("status"),
                        col("czjy"), col("mx"), col("mxmc"), col("mxms"), col("dllx"), col("rc"), col("yyzl"), col("sccj"), col("zzrq"), col("ljzx"), col("gzm"), col("gzly"), col("fssj"), col("zdlb"), col("yfyy"), col("hkzbjy"), col("hkjxjy"));
        sqwxInsertFinalDs.select("id", "s_train_id", "cx", "cx_id", "ch", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "dllx", "rc", "yyzl", "sccj", "zzrq", "ljzx", "gzly", "zdlb", "mx", "mxmc", "mxms", "czjy", "yfyy", "gzm", "hkzbjy", "hkjxjy", "fssj", "create_time").createOrReplaceTempView("sqwx_22");
        //MysqlUtils_3.upsertDatasetToMySQL(sqwxInsertFinalDs, "t_phm_sqwx");

        // 健康评估需要的数据: ads_phm_fault 当前趟的历史数据 + 当前批次的 tcms 的 ads_phm_fault 数据 进行 union + 聚合的结果.
        Dataset<Row> tcmsAdsPhmFaultDS = selectFinalInsertAdsPhmFaultAddMxmcDS.select(col("s_train_id"), col("s_fault_bw_code2").as("bjpjm"), col("phm_bjwz").as("bjwz"),
                split(col("mx_id"), "-").getItem(4).as("mxmc"), col("phm_gzmc").as("xxms"));
        Dataset<Row> rowDatasetHistoryDS = adsPhmFaultLocalHistory.select(col("s_train_id"), col("s_fault_bw_code2").as("bjpjm"), col("phm_bjwz").as("bjwz"),
                split(col("mx_id"), "-").getItem(4).as("mxmc"), regexp_replace(concat_ws("", col("phm_gzmc")), "(主变1|主变2|变流器1|变流器2)", "").as("xxms")
        ).filter(expr("mxmc in ('10022','10077')"));
        Dataset<Row> healthDs = null;
        if (rowDatasetHistoryDS.count() > 0) {
            healthDs = tcmsAdsPhmFaultDS.union(rowDatasetHistoryDS)
                    .groupBy("s_train_id", "bjpjm", "bjwz", "mxmc")
                    .agg(collect_set(col("xxms")).alias("phm_gzmc"));
        } else {
            healthDs = tcmsAdsPhmFaultDS
                    .groupBy("s_train_id", "bjpjm", "bjwz", "mxmc")
                    .agg(collect_set(col("xxms")).alias("phm_gzmc"));
        }
        // TODO 这里的逻辑后续可能还会改,改的地方在于: 如果当前车型、车号、位置、没有报 所有模块均过热 ,但是在多个10秒内出现了 : 第一重模块过热 + 第二重模块过热 + 第三重模块过热 +  辅逆2模块过热 中至少两个,该怎么评判呢 ?
        // 返回模块过热的健康评估结果: 评估规则: 当 phm_gzmc 长度 >= 2 或者 phm_gzmc 这个数组中包含:所有模块均过热 则评估为 C,否则评估为 B.
        Dataset<Row> healthDsFinalDs = healthDs
                .withColumn("pgjg", when(size(col("phm_gzmc")).geq(2).or(array_contains(col("phm_gzmc"), "所有模块均过热")), "D").otherwise("C"))
                .withColumn("zxx", lit(""))
                .withColumn("jgms", concat_ws(",", col("phm_gzmc"))).select("s_train_id", "bjpjm", "bjwz", "mxmc", "pgjg", "zxx", "jgms");
        healthDsFinalDs.select("S_TRAIN_ID", "BJPJM", "BJWZ", "MXMC", "ZXX", "jgms", "PGJG").createOrReplaceTempView("pgjg_22");
    }
    //}

    public static Dataset<Row> selectColumnsAdsPhmFault(Dataset<Row> inputDS) {
        return inputDS.select(col("s_train_id"), col("idx").as("s_fault_id"), concat_ws("-", col("s_train_id"), col("phm_bjwz"), col("mx_id"), date_format(col("cksj"), "yyyyMMddHHmmss")).as("mx_id"), col("s_train_id").as("s_train_name"),
                col("cx").as("s_train_type_code"), col("source").as("s_fault_source"), col("gzkssj_up").as("s_fault_time"), col("ssxt").as("s_fault_sys"),
                col("xtbm").as("s_fault_sys_code1"), col("xtpjm").as("s_fault_sys_code2"), col("ssbj").as("s_fault_bw"), col("bjbm").as("s_fault_bw_code1"),
                col("bjpjm").as("s_fault_bw_code2"), col("gzdm").as("s_fault_code"), col("gzmc").as("s_fault_name"), col("phm_lb"), col("mxjg"), col("phm_mc").as("phm_gzmc"),
                col("phm_bjwz"), col("zj_ms"), col("zj_fa"), col("pjxlh"), col("pjdjm"), lit("1").as("s_htzt"), lit("0").as("s_hkzt"), lit("1").as("s_xfzt"));
    }

    public static Dataset<Row> selectColumnsTPhmSqwxOut(Dataset<Row> writeDs) {
        return writeDs.select(col("id"), col("s_train_id"), col("cx"), col("cx_id"), col("lj"), col("ljjc"), col("ljm"), col("psd"), col("psdjc"),
                col("psddm"), col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("bjwz"), col("bjxlh"),
                col("bjdjm"), col("xfzt"), col("htzt"), col("hkzt"), col("create_time"), col("status"),
                col("czjy"), col("mx"), col("mxmc"), col("mxms"), col("dllx"), col("rc"), col("yyzl"), col("sccj"), col("zzrq"), col("ljzx"), col("gzm"), col("gzly"), col("zdlb"), col("yfyy"), col("hkzbjy"), col("hkjxjy"));
    }

    /***
     * 处理 `HXD1C` 中特殊的几个部件的健康评估结果 10083,原边过流报警故障,10086,原边接地报警故障,10092,低网压报警故障,10093,高网压报警故障
     * @param tcms_mxxq_DS_1
     * @throws IOException
     */
    private static void specialOneProcess(Dataset<Row> tcms_mxxq_DS_1) throws IOException {
        // 入ads_phm_fault表中的数据,这里需要按照时间窗口进行聚合操作 窗口大小 1分钟.
        Dataset<Row> tcms_mxxqDs = tcms_mxxq_DS_1.selectExpr(
                "s_train_id",
                "idx AS s_fault_id",
                "mx_id",
                "date_format(cksj, 'yyyyMMddHHmmss') as cksj",
                // "concat_ws('-', s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS mx_id",
                "s_train_id AS s_train_name",
                "cx AS s_train_type_code",
                "source AS s_fault_source",
                "gzkssj_up AS s_fault_time",
                "ssxt AS s_fault_sys",
                "xtbm AS s_fault_sys_code1",
                "xtpjm AS s_fault_sys_code2",
                "ssbj AS s_fault_bw",
                "bjbm AS s_fault_bw_code1",
                "bjpjm AS s_fault_bw_code2",
                "gzdm AS s_fault_code",
                "gzmc AS s_fault_name",
                "phm_lb",
                "mxjg",
                "phm_mc AS phm_gzmc",
                "phm_bjwz",
                "zj_ms",
                "zj_fa",
                "pjxlh",
                "pjdjm",
                "1 AS s_htzt",
                "0 AS s_hkzt",
                "1 AS s_xfzt"
        );

        // 定义窗口函数,这里不需要 s_fault_id , phm_gzmc , s_fault_time  需要取到最小值.
        WindowSpec windowSpec = Window.partitionBy("s_train_id", "mx_id", "cksj", "phm_lb", "mxjg").orderBy("s_fault_time");
        // 计算每一行与上一行的时间差（单位：秒）
        Dataset<Row> allProcessRealFaultsSecondDiff = tcms_mxxqDs.withColumn(
                "time_diff",
                unix_timestamp(col("s_fault_time")).minus(lag(unix_timestamp(col("s_fault_time")), 1).over(windowSpec))
        );
        // 判断时间差是否大于等于60秒，如果是，则表示新的会话开始
        Dataset<Row> allProcessRealFaultsSessionFlag = allProcessRealFaultsSecondDiff.withColumn(
                "is_new_session",
                when(col("time_diff").isNull().or(col("time_diff").geq(60)), 1).otherwise(0)
        );
        // 计算会话 ID
        Dataset<Row> allProcessRealFaultsSessionId = allProcessRealFaultsSessionFlag.withColumn(
                "session_id",
                sum("is_new_session").over(windowSpec)
        );
        // System.out.println("窗口内的数据展示: allProcessRealFaultsSessionId");
        // allProcessRealFaultsSessionId.show(20, false);
        // 将会话内的数据进行聚合
        Dataset<Row> finalInsertAdsPhmFaultAddMxmcDS = allProcessRealFaultsSessionId
                .groupBy("s_train_id", "mx_id", "cksj", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1",
                        "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg", "s_htzt", "s_hkzt", "s_xfzt", "session_id")
                .agg(
                        collect_set(col("phm_gzmc")).alias("phm_gzmc"),
                        min(col("s_fault_time")).alias("s_fault_time"),
                        collect_set(col("phm_bjwz")).alias("phm_bjwz"),
                        collect_set(col("pjxlh")).alias("pjxlh"),
                        collect_set(col("pjdjm")).alias("pjdjm"),
                        collect_set(col("zj_ms")).alias("zj_ms"),
                        collect_set(col("zj_fa")).alias("zj_fa")
                );
        // 如果位置1和位置2在60s内都会报故障,则展示两个位置都报了这个组合故障,需要进行列转行操作.
        Dataset<Row> explodedDS = finalInsertAdsPhmFaultAddMxmcDS
                .withColumn("phm_bjwz", explode(col("phm_bjwz")))
                // 这里需要将这几种特殊报警名称进行判断操作:判断依据是 phm_gzmc这个数组的长度是否大于1,如果多余1 并且 mx_id = 某个值,给一个字段mxmc,eg:单网压,多网压
                .withColumn("mxmc", expr("CASE\n" +
                        "           WHEN mx_id = '10083' THEN if(size(phm_gzmc) > 1,'多个变流器原边过流','单个变流器原边过流')\n" +
                        "           WHEN mx_id = '10086' THEN if(size(phm_gzmc) > 1,'多变流器原边接地','单变流器原边接地')\n" +
                        "           WHEN mx_id = '10092' THEN if(size(phm_gzmc) > 1,'多变流器低网压保护','单变流器低网压保护')\n" +
                        "           WHEN mx_id = '10093' THEN if(size(phm_gzmc) > 1,'多变流器高网压保护','单变流器高网压保护')\n" +
                        "           ELSE '' END AS mxmc"))
                .withColumn("phm_gzmc", concat_ws(",", col("phm_gzmc")))
                .withColumn("s_fault_time", concat_ws(",", col("s_fault_time")))
                .withColumn("pjxlh", concat_ws(",", col("pjxlh")))
                .withColumn("pjdjm", concat_ws(",", col("pjdjm")))
                .withColumn("zj_ms", concat_ws(",", col("zj_ms")))
                .withColumn("zj_fa", concat_ws(",", col("zj_fa")))
                .withColumn("s_fault_id", expr("uuid()"))
                .withColumn("mx_id", expr("concat_ws('-', s_train_id, phm_bjwz, mx_id, cksj)"));

        Dataset<Row> finalInsertAdsPhmFaultDS = explodedDS.select("s_train_id", "s_fault_id", "mx_id", "s_train_name", "s_train_type_code", "s_fault_source", "s_fault_time", "s_fault_sys", "s_fault_sys_code1", "s_fault_sys_code2", "s_fault_bw", "s_fault_bw_code1", "s_fault_bw_code2", "s_fault_code", "s_fault_name", "phm_lb", "mxjg", "phm_gzmc", "phm_bjwz", "zj_ms", "zj_fa", "pjxlh", "pjdjm", "s_htzt", "s_hkzt", "s_xfzt");

        MysqlUtils_3.writeDataToMysqlTable(finalInsertAdsPhmFaultDS, "ads_phm_fault", SaveMode.Append);

        // 这里需要将上面处理过的故障名称数据和当前批次报的数据中进行join,获取入到 t_phm_sqwx 这个表中的所有字段

        selectColumnsTPhmSqwxIn(tcms_mxxq_DS_1).alias("t1").join(explodedDS.alias("t2"),
                expr("t1.s_train_id=t2.s_train_id and split(t1.mx_id,'-')[3] = t2.mx_id and t1.phm_bjwz = t2.bjwz"), "join");


    }

    /***
     * 从 tcms 实时数据 + 模型详情 join的宽表中 拉取 `t_phm_sqwx` 表中需要聚合的字段.
     * @param inputDS
     * @return
     */
    public static Dataset<Row> selectColumnsTPhmSqwxIn(Dataset<Row> inputDS) {
        return inputDS.select(col("s_train_id"), col("cx"), col("cx_id"), col("lj"), col("ljjc"), col("lj_id").as("ljm"), col("psd"), col("psdjc"),
                col("psd_id").as("psddm"), col("ssxt"), col("xtpjm"), col("ssbj"), col("bjpjm"), col("phm_bjwz").as("bjwz"), col("pjxlh").as("bjxlh"),
                col("pjdjm").as("bjdjm"), lit("1").as("xfzt"), lit("1").as("htzt"), lit("0").as("hkzt"), lit("1").as("status"),
                col("czjy"), col("mxjg").as("mx"), col("mxmc"), col("xxms").as("mxms"), date_format(col("cksj"), "yyyyMMddHHmmss").as("cksj"), col("mx_id"),
                col("jcdllxmc").as("dllx"), col("rc"), lit("-").as("yyzl"), col("zzc").as("sccj"), col("zzrq"), col("ljzxgl").as("ljzx"), col("gzdm").as("gzm"), lit("TCMS").as("gzly"), lit("-").as("zdlb"), col("yfyy"), col("zbjy").as("hkzbjy"), col("JXJY").as("hkjxjy"));
    }

    /***
     * 针对简单逻辑:来一条入一条,并最终返回健康评估结果
     * @param spark
     * @param adsPhmFaultLocalHistory 获取牵引变流器中指定部件的 `ads_phm_fault` 的历史数据
     * @param tcms_mxxq_DS
     */
    private static void commonHealthEvaluate(SparkSession spark, Dataset<Row> adsPhmFaultLocalHistory, Dataset<Row> tcms_mxxq_DS) {
        // 进行健康评估
        Dataset<Row> tcmsAdsPhmFaultDS = tcms_mxxq_DS.select(col("s_train_id"), col("bjpjm"), col("phm_bjwz").as("bjwz"),
                col("mx_id").as("mxmc"), col("xxms"));
        Dataset<Row> rowDatasetHistoryDS = adsPhmFaultLocalHistory.select(col("s_train_id"), col("S_FAULT_BW_CODE2").as("bjpjm"), col("phm_bjwz").as("bjwz"),
                        split(col("mx_id"), "-").getItem(4).as("mxmc"), col("phm_gzmc").as("xxms"))
                // ,'10083','10086','10092','10093','100082'
                .filter(expr("mxmc not in ('10003','10022','10077')"));
        Dataset<Row> healthDs = null;
        if (rowDatasetHistoryDS.count() > 0) {
            healthDs = tcmsAdsPhmFaultDS.union(rowDatasetHistoryDS)
                    .groupBy("s_train_id", "bjpjm", "bjwz", "mxmc")
                    .agg(concat_ws(",", collect_set(col("xxms"))).alias("phm_gzmc"));
        } else {
            healthDs = tcmsAdsPhmFaultDS
                    .groupBy("s_train_id", "bjpjm", "bjwz", "mxmc")
                    .agg(concat_ws(",", collect_set(col("xxms"))).alias("phm_gzmc"));
        }
        // 健康评估处理逻辑:这里根据每个车型、车号、每趟、每个位置上的数据进行判定,判定规则见 牵引变流器健康评估文档(C:\Users\tianc\Desktop\功能开发\牵引变流器健康评估文档)
        Dataset<Row> rowDataset1 = healthDs.withColumn("zxx", lit("")).select("s_train_id", "bjpjm", "bjwz", "mxmc", "zxx", "phm_gzmc")
                .withColumn("pgjg_ctn", size(split(col("phm_gzmc"), ",")))
                .withColumnRenamed("phm_gzmc", "jgms");
        rowDataset1.select("S_TRAIN_ID", "BJPJM", "BJWZ", "MXMC", "pgjg_ctn", "ZXX", "jgms").createOrReplaceTempView("pgjg");
        spark.sql("" +
                "select S_TRAIN_ID,BJPJM,BJWZ,MXMC,ZXX,jgms,\n" +
                "       case\n" +
                "           when jgms LIKE '%短接接触器%' then if(MXMC in ('10075', '10076'), 'D', 'C')\n" +
                "           when jgms LIKE '%温度过高保护%' then if(MXMC = '10088', 'D', 'C')\n" +
                "           when pgjg_ctn > 2 then if(MXMC in ('10085', '10087'), 'D', 'C')\n" +
                "           when MXMC in ('10079', '10080', '10099') then 'C'\n" +
                "           when MXMC in ('10081','10083', '10084','100086', '10089', '10090', '10091','100092','100093', '10094', '10095', '10096', '10097', '10098') then 'D'\n" +
                "           else 'D' end as pgjg\n" +
                "from pgjg\n" +
                "where s_train_id like '%HXD1C%'").createOrReplaceTempView("pgjg_1");
        spark.sql("" +
                "select S_TRAIN_ID,BJPJM,BJWZ,MXMC,ZXX,jgms,\n" +
                "       case\n" +
                "           when pgjg_ctn > 2 then if(MXMC in ('10001', '10002', '10016', '10019'), 'D', 'C')\n" +
                "           else 'D' end as pgjg\n" +
                "from pgjg\n" +
                "where s_train_id like '%HXD1D%'").createOrReplaceTempView("pgjg_2");
    }

    private static void upsertIntoTPhmSqwx(SparkSession spark, Dataset<Row> tcms_mxxq_DS, String mx_ids_filter) throws IOException {
        // TODO 2023-05-28: 修改入 t_phm_sqwx 这个表中 mxmc 字段的取值: 由以前的 jgms 字段 --> mxmc 字段.
        Dataset<Row> sqwxLocalBatchDS = tcms_mxxq_DS.selectExpr("concat_ws('-',train_status, s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS ID",
                "s_train_id",
                "cx",
                "cx_id",
                "split(s_train_id, '-')[1] as ch",
                // "ch",
                "lj",
                "ljjc",
                "lj_id AS ljm",
                "psd",
                "psdjc",
                "psd_id AS psddm",
                "ssxt",
                "xtpjm",
                "ssbj",
                "bjpjm",
                "phm_bjwz AS bjwz",
                "pjxlh AS bjxlh",
                "pjdjm AS bjdjm",
                "mxjg AS mx",
                // 2023-05-28 修改 表事情维修表中 mxmc 这个字段的取值,由以前的取模型详情表中的 jgms 这个字段,修改为:MXMC 这个字典的值.
                //"jgms AS mxmc",
                "mxmc",
                "xxms AS mxms",
                "current_timestamp AS create_time",
                "czjy",
                "1 AS xfzt",
                "1 AS htzt",
                "0 AS hkzt",
                // 这里有点问题,以前定的是: 0-变压器 1-变流器,其他部件呢 ? 确认后先空着,这里先用 '-' 占位
                "1 AS status",
                "jcdllxmc AS dllx",
                "rc",
                "'-' AS yyzl",
                "zzc AS sccj",
                "zzrq",
                "ljzxgl AS ljzx",
                "gzdm AS gzm",
                "'TCMS' AS gzly",
                "'-' AS zdlb",
                // 这里的发生时间,上次最后代码ch要求修改过,取的时间是 故障发生的最小时间.
                "gzkssj_up as fssj",
                "yfyy",
                "zbjy AS hkzbjy",
                "JXJY AS hkjxjy"
        );
//        System.out.println(" = = =  = sqwxLocalBatchDS  = = = = =  = 433 行");
//        sqwxLocalBatchDS.show(20, false);
        // 获取当前趟中 t_phm_sqwx 历史数据,和上面的当前批次中的数据进行 union,再进行关键字段的聚合,最终将当前执行结果更新写操作入表.
        Dataset<Row> sqwxHistoryDS = spark.sql("select id,s_train_id, cx,cx_id, ch, lj, ljjc, ljm, psd, psdjc, psddm, ssxt, xtpjm, ssbj, bjpjm, bjwz, bjxlh, bjdjm, mx, mxmc, mxms, create_time, czjy, xfzt, htzt,hkzt, status, dllx, rc, yyzl, sccj, zzrq, ljzx, gzm, gzly, zdlb, fssj, yfyy, hkzbjy, hkjxjy " +
                "from t_phm_sqwx where HKZT = '0' and BJPJM  = 'PJ00003' and CX IN ('HXD1D','HXD1C') and split(id, '-')[3]  " + mx_ids_filter + " ");
//        System.out.println(" = = = = = sqwxHistoryDS 438行 = = = = = = ");
//        sqwxHistoryDS.show(20, false);
        Dataset<Row> sqwxUpdataDS = sqwxLocalBatchDS.union(sqwxHistoryDS)
                .groupBy("id", "s_train_id", "cx", "cx_id", "ch", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "dllx", "rc", "yyzl", "sccj", "zzrq", "ljzx", "gzly", "zdlb")
                .agg(
                        concat_ws(",", collect_set(col("mx"))).alias("mx"),
                        concat_ws(",", collect_set(col("mxmc"))).alias("mxmc"),
                        concat_ws(",", collect_set(col("mxms"))).alias("mxms"),
                        concat_ws(",", collect_set(col("czjy"))).alias("czjy"),
                        concat_ws(",", collect_set(col("yfyy"))).alias("yfyy"),
                        concat_ws(",", collect_set(col("gzm"))).alias("gzm"),
                        concat_ws(",", collect_set(col("hkzbjy"))).alias("hkzbjy"),
                        concat_ws(",", collect_set(col("hkjxjy"))).alias("hkjxjy"),
                        min("fssj").alias("fssj")
                )
                .withColumn("create_time", current_timestamp())
                .withColumn("mx", expr("distinctString(mx)"))
                .withColumn("mxmc", expr("distinctString(mxmc)"))
                .withColumn("mxms", expr("distinctString(mxms)"))
                .withColumn("czjy", expr("distinctString(czjy)"))
                .withColumn("yfyy", expr("distinctString(yfyy)"))
                .withColumn("gzm", expr("distinctString(gzm)"))
                .withColumn("hkzbjy", expr("distinctString(hkzbjy)"))
                .withColumn("hkjxjy", expr("distinctString(hkjxjy)"));
//        System.out.println(" = = = = = = 462行 sqwxUpdataDS sqwx_1 ");
        sqwxUpdataDS.createOrReplaceTempView("sqwx_1");
        // 写入 t_phm_sqwx 中数据
        // MysqlUtils_3.upsertDatasetToMySQL(sqwxUpdataDS, "t_phm_sqwx");
    }

    private static void insertIntoAdsPhmFault(Dataset<Row> tcms_mxxq_DS) throws IOException {
        Dataset<Row> adsPhmFaultDS = tcms_mxxq_DS.selectExpr(
                        "s_train_id",
                        "idx AS s_fault_id",
                        "concat_ws('-',train_status,s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS mx_id",
                        "s_train_id AS s_train_name",
                        "cx AS s_train_type_code",
                        "source AS s_fault_source",
                        "gzkssj_up AS s_fault_time",
                        "ssxt AS s_fault_sys",
                        "xtbm AS s_fault_sys_code1",
                        "xtpjm AS s_fault_sys_code2",
                        "ssbj AS s_fault_bw",
                        "bjbm AS s_fault_bw_code1",
                        "bjpjm AS s_fault_bw_code2",
                        "gzdm AS s_fault_code",
                        "gzmc AS s_fault_name",
                        "phm_lb",
                        "mxjg",
                        "phm_mc AS phm_gzmc",
                        "phm_bjwz",
                        "zj_ms",
                        "zj_fa",
                        "pjxlh",
                        "pjdjm",
                        "1 AS s_htzt",
                        "0 AS s_hkzt",
                        "1 AS s_xfzt"
                )
                .withColumn("create_time", current_timestamp())
                .dropDuplicates("s_fault_id");
        // 测试通过,入 ads_phm_fault_1 表中的数据没有问题.
        MysqlUtils_3.writeDataToMysqlTable(adsPhmFaultDS, "ads_phm_fault", SaveMode.Append);
    }

    private static Dataset<Row> tcmsJoinMxxqDs(SparkSession spark, String mx_ids_filter) {
        // s_train_id,idx,cx,source,gzkssj_up,ssxt,xtbm,xtpjm,ssbj,bjbm,bjpjm,gzdm_wm,gzmc,phm_lb,phm_mc,phm_bjwz,zj_ms,zj_fa,pjxlh,pjdjm,lj,ljjc,lj_id,psd,psdjc,psd_id,cksj,train_status
        // JCDLLXMC,RC,YYZL,ZZC,ZZRQ,LJZXGL,GZM,GZLY,ZDLB,MX_ID,SYJX,MXMC,MXJG,BJPJM,JGMS,XXMS,CZJY,ZBJY,JXJY,YFYY
        Dataset<Row> tcms_mxxq_DS = spark.sql("" +
                "with t2 as (\n" +
                "    select MX_ID,SYJX,MXMC,MXJG,JGMS,XXMS,CZJY,ZBJY,JXJY,YFYY,phm_gzm\n" +
                "    from t_phm_mxxq\n" +
                // 这里需要过滤掉特殊的部件: 元件故障、模块过热、还有HXD1C中的特殊的部件:输入过流报警 10082 、原边过流报警 10083 、 原边接地报警故障 10086 、低网压报警故障 10092 、高网压报警故障 10093
                "    where SYJX in ('HXD1D', 'HXD1C') and BJPJM = 'PJ00003' and MX_ID " + mx_ids_filter + "\n" +
                ")\n" +
                "select t1.*, t2.* \n" +
                "from tcmsEvent t1\n" +
                // TODO 2023-07-28 修改 故障关联关系，由以前的故障名称变成了故障码关联
                // "         join t2 on regexp_replace(t1.phm_mc, ' ', '') = regexp_replace(t2.xxms, ' ', '') and t1.cx = t2.SYJX");
                "         join t2 on t1.cx = t2.SYJX and t1.gzdm = t2.phm_gzm");
        return tcms_mxxq_DS;
    }
}