package org.example.dev;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.example.ch.InitKafka;
import org.example.ch.getProperties;
import org.example.develop.StringUtilsUDF;
import org.example.develop.jxxxOrderGroupUDF;
import org.example.utils.MysqlUtils_3;
import org.example.utils.PrepareDimTable;

import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Properties;

import static org.apache.spark.sql.functions.*;

public class QytfjAndLqtfj {
    private static final String CONFIGPATHS = "config.properties";
    public static SparkSession spark = null;
    public static JavaSparkContext sc = null;

    private static Properties mysqlProperties_new = new Properties();

    private static final String CONFIGPATH = "config.properties";

    static {
        mysqlProperties_new.setProperty("user", getProperties.getPropertiesKey("mysql_user", CONFIGPATH));
        mysqlProperties_new.setProperty("password", getProperties.getPropertiesKey("mysql_pwd", CONFIGPATH));
        mysqlProperties_new.setProperty("driver", getProperties.getPropertiesKey("mysql_driver", CONFIGPATH));
        mysqlProperties_new.setProperty("url", getProperties.getPropertiesKey("mysql-url", CONFIGPATH));
    }

    public static void main(String[] args) throws IOException, AnalysisException {
        SparkConf conf = new SparkConf()
                .setMaster("local[*]") // 设置运行模式，这里是本地模式
                .set("spark.executor.memory", "6g") // 设置executor内存大小
                .setAppName("hxd3_hxd3c_qytfj_lqtfj_mx")
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .set("spark.driver.allowMultipleContexts", "true")
                .set("spark.hadoop.validateOutputSpecs", "false")
                .set("hive.mapred.supports.subdirectories", "true")
                .set("spark.streaming.backpressure.enabled", "true")
                .set("spark.streaming.kafka.maxRatePerPartition", "60")
                .set("spark.default.parallelism", "100")
                .set("spark.sql.shuffle.partitions", "20")
                .set("mapreduce.input.fileinputformat.input.dir.recursive", "true")
                .set("spark.scheduler.listenerbus.eventqueue.capacity", "100000")
                .set("spark.streaming.kafka.consumer.cache.enabled", "false")
                .set("spark.debug.maxToStringFields", "100");
        sc = new JavaSparkContext(conf);
        sc.setLogLevel("ERROR");
        JavaStreamingContext jsc = new JavaStreamingContext(sc, Durations.seconds(300));
        spark = SparkSession
                .builder()
                .appName("hxd2_qyblq")
                .config("spark.sql.autoBroadcastJoinThreshold", 500L * 1024 * 1024)  // 设置为500M
                .config("spark.sql.shuffle.partitions", "20")
                .config("spark.shuffle.consolidateFiles", "true")
                .config("spark.shuffle.sort.bypassMergeThreshold", "200")
                .config("spark.shuffle.consolidateFiles", "true")
                .getOrCreate();
        spark.udf().register("distinctString", new UDF1<String, String>() {
            public String call(String input) {
                return StringUtilsUDF.distinctString(input);
            }
        }, DataTypes.StringType);

        spark.udf().register("jxxxOrderGroupUDF", new jxxxOrderGroupUDF(), DataTypes.StringType);
        // 所有车的信息
        PrepareDimTable.ps_cx_dllx_lj_psd(spark);

        // TODO 2023-10-13 ps表中添加字段: cx_id
        spark.sql("select s_train_id,cx,cx_id,ch,lj_id,lj,ljjc,psd_id,psd,psdjc,yyd_id,yyd,yydjc,zzc,zzrq,psrq,jcdllxmc,ljpxm,s_train_type from ps_cx_dllx_lj_jwd")
                .filter("cx in ('HXD3','HXD3C','HXD1C','HXD1D')")
                .createOrReplaceTempView("ps");

        // 机车履历表,当前表中维护着车以及车上部件的信息
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_jcda_jcll_1", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_jcda_jcll_1");
        // MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_jcda_jcll_1");
        spark.sql("" +
                        "SELECT CXM, CH, GXMC, PJFLBM, WZ, PJXLH, PJDJM, S_TRAIN_ID, ZZRQ, SCCJ, GZXH, CHANGETIME\n" +
                        "FROM (\n" +
                        "       SELECT CXM, CH, GXMC, PJFLBM, WZ, PJXLH, PJDJM, CONCAT(CXM, '-', CH) AS S_TRAIN_ID, ZZRQ, SCCJ, GZXH,\n" +
                        "              CHANGETIME, row_number() OVER (PARTITION BY CXM, CH, PJFLBM, WZ ORDER BY CHANGETIME DESC) AS rank_jcll\n" +
                        "       FROM dim_jmis_jcda_jcll_1\n" +
                        "       WHERE CXM IN ('HXD3', 'HXD3C','HXD1D', 'HXD1C') AND PJFLBM IN ('PJ00033', 'PJ00177')\n" +
                        "     ) jcll_temp\n" +
                        "WHERE rank_jcll = '1'\n")
                .createOrReplaceTempView("jcll");

        // 机车构型字典中的数据
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_t_jcbm_jcgxzd", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_t_jcbm_jcgxzd");
        // MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_t_jcbm_jcgxzd");
        spark.sql("" +
                        "select SYCX,FLMC, LBJBM, WZMC, FLJC as SysFL, FLBM as SysFLCode1, LBJBM as SysFLCode2\n" +
                        "from dim_jmis_t_jcbm_jcgxzd\n" +
                        "where SYCX in ('HXD3', 'HXD3C','HXD1D', 'HXD1C') and LBJBM in ('PJ00033', 'PJ00177')")
                .createOrReplaceTempView("gxzd");
        // 走行1,走行2,走行3
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_jcda_t_jxglzxgl_v", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_jcda_t_jxglzxgl_v");
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_jxgl_t_jxgl_jxdt", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_jxgl_t_jxgl_jxdt");
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_jcda_zxgl_day", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_jcda_zxgl_day");
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_t_jcbm_zjxc", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_t_jcbm_zjxc");
//        MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_jcda_t_jxglzxgl_v");
//        MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_jxgl_t_jxgl_jxdt");
//        MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_jcda_zxgl_day");
//        MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_t_jcbm_zjxc");

        spark.sql("select concat(cx,'-',ch) as S_TRAIN_ID,round(LJZX) as LJZX from dim_jmis_jcda_t_jxglzxgl_v where cx in('HXD1D','HXD1C','HXD3','HXD3C')").createOrReplaceTempView("zx1");
        spark.sql("" +
                "select S_TRAIN_ID, ROUND(SUM(ZXGL_DAY), 0) AS LJZX\n" +
                "from (\n" +
                "       SELECT CONCAT(cxjc, '-', ch) AS S_TRAIN_ID, MAX(JGRQ) AS JXRQ\n" +
                "       FROM dim_jmis_jxgl_t_jxgl_jxdt\n" +
                "       WHERE RC IN ('C4修', 'C5修', 'C6修')\n" +
                "       GROUP BY cxjc, ch\n" +
                ") jxdt_mid\n" +
                "LEFT JOIN dim_jmis_jcda_zxgl_day zx  ON jxdt_mid.S_TRAIN_ID = CONCAT(zx.CX, '-', zx.CH) AND zx.REPOT_DATE > jxdt_mid.JXRQ\n" +
                "GROUP BY S_TRAIN_ID").createOrReplaceTempView("zx2");

        spark.sql("" +
                "SELECT S_TRAIN_ID, ROUND(SUM(ZXGL_DAY), 0) AS LJZX\n" +
                "FROM (\n" +
                "       SELECT CONCAT(zjxc.cxjc, '-', zjxc.ch) AS S_TRAIN_ID, zx.ZXGL_DAY AS ZXGL_DAY\n" +
                "       FROM dim_jmis_t_jcbm_zjxc zjxc LEFT JOIN dim_jmis_jcda_zxgl_day zx ON CONCAT(zx.CX, '-', zx.CH) = CONCAT(zjxc.cxjc, '-', zjxc.ch) AND zx.REPOT_DATE > zjxc.KCRQ\n" +
                "     ) A\n" +
                "GROUP BY S_TRAIN_ID").filter("ljzx is not null").createOrReplaceTempView("zx3");

        spark.sql("" +
                "select ps.s_train_id as s_train_id, ps.cx as cxm, ps.ch as ch,ps.cx_id,ps.lj_id, ps.lj, ps.ljjc,ps.psd_id,ps.psd,   ps.psdjc, ps.ljpxm,ps.jcdllxmc as jcdllxmc,\n" +
                "gxzd.wzmc as wz, gxzd.sysfl,gxzd.sysflcode1,gxzd.sysflcode2, gxzd.flmc as pjflm,\n" +
                "gxzd.sysflcode2 as pjflbm,gxzd.flmc as gxmc, jcll.pjxlh as pjxlh, jcll.pjdjm as pjdjm,jcll.zzrq as zzrq,jcll.sccj,jcll.gzxh,jcll.gzxh as ggxh_th,jcll.changetime as changetime,\n" +
                "cast(round(z1.ljzx) as string) ljzxgl,cast(round(z2.ljzx) as string) gjxhzx,cast(round(z3.ljzx) as string) zjxhzx\n" +
                "from ps\n" +
                "     LEFT JOIN gxzd ON ps.cx = gxzd.SYCX\n" +
                "     LEFT JOIN jcll ON ps.s_train_id = jcll.s_train_id and gxzd.WZMC = jcll.wz\n" +
                "     LEFT JOIN zx1 z1 ON ps.s_train_id = z1.S_TRAIN_ID\n" +
                "     LEFT JOIN zx2 z2 ON ps.s_train_id = z2.S_TRAIN_ID\n" +
                "     LEFT JOIN zx3 z3 ON ps.s_train_id = z3.S_TRAIN_ID").createOrReplaceTempView("ps_gxzd_jcll_zx");
        spark.sql("" +
                "SELECT *,\n" +
                "       CONCAT('{\"部件状态\":\"新造\",',\n" +
                "              '\"生产厂家\":\"', IF(SCCJ IS NOT NULL, SCCJ, '-'), '\",',\n" +
                "              '\"检修段\":\"-\",',\n" +
                "              '\"日期\":\"', IF(CHANGETIME IS NOT NULL, CHANGETIME, '-'), '\",',\n" +
                "              '\"本次走行公里\":\"-\",',\n" +
                "              '\"下车型号\":\"-\",',\n" +
                "              '\"下车位置\":\"-\",',\n" +
                "              '\"上车型号\":\"', IF(S_TRAIN_ID IS NOT NULL, S_TRAIN_ID, '-'), '\",',\n" +
                "              '\"上车位置\":\"1\"}') jcll_jxxx\n" +
                "FROM ps_gxzd_jcll_zx\n").createOrReplaceTempView("ps_gxzd_jcll_zx_jxxx");
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "dim_jmis_t_jxgl_jxjld_jbxx", mysqlProperties_new)
                .createOrReplaceTempView("dim_jmis_t_jxgl_jxjld_jbxx");

        // MysqlUtils_3.readMysqlToTempTable(spark, "dim_jmis_t_jxgl_jxjld_jbxx");
        spark.sql("" +
                "SELECT a2.SCXH,\n" +
                "       a2.LBJBM,\n" +
                "       a2.SCWZ,\n" +
                "       CONCAT_WS('&', COLLECT_SET(data_jxxx)) AS jxjld_jxxx\n" +
                "FROM (\n" +
                "       SELECT t3.SCXH,\n" +
                "              t3.LBJBM,\n" +
                "              t3.SCWZ,\n" +
                "              CONCAT(\n" +
                "                      '{\"部件状态\":\"', IF(t3.BJZT IS NOT NULL, t3.BJZT, '-'),\n" +
                "                      '\",\"生产厂家\":\"', IF(t3.SCCJ IS NOT NULL, t3.SCCJ, '-'),\n" +
                "                      '\",\"检修段\":\"', IF(t3.JXD IS NOT NULL, t3.JXD, '-'),\n" +
                "                      '\",\"日期\":\"', IF(t3.RQ IS NOT NULL, t3.RQ, '-'),\n" +
                "                      '\",\"本次走行公里\":\"', IF(t3.BCZXGL IS NOT NULL, t3.BCZXGL, '-'),\n" +
                "                      '\",\"下车型号\":\"', IF(t3.XCXH IS NOT NULL, t3.XCXH, '-'),\n" +
                "                      '\",\"下车位置\":\"', IF(t3.XCWZ IS NOT NULL, t3.XCWZ, '-'),\n" +
                "                      '\",\"上车型号\":\"', IF(t3.SCXH IS NOT NULL, t3.SCXH, '-'),\n" +
                "                      '\",\"上车位置\":\"', IF(t3.SCWZ IS NOT NULL, t3.SCWZ, '-'),\n" +
                "                      '\"}') data_jxxx\n" +
                "       FROM (\n" +
                "              SELECT t2.LBJBM,\n" +
                "                     t2.BJZT,\n" +
                "                     t2.SCCJ,\n" +
                "                     t2.JXD,\n" +
                "                     t2.RQ,\n" +
                "                     t2.BCZXGL,\n" +
                "                     t2.XCXH,\n" +
                "                     t2.XCWZ,\n" +
                "                     t2.SCXH,\n" +
                "                     t2.SCWZ,\n" +
                "                     ROW_NUMBER() OVER (PARTITION BY t2.SCXH,t2.LBJBM,t2.SCWZ,t2.BJZT ORDER BY t2.RQ DESC) AS rk\n" +
                "              FROM (\n" +
                "                     SELECT t1.LBJBM,\n" +
                "                            CONCAT(t1.RT, t1.RC) AS BJZT,\n" +
                "                            t1.ZZCJ AS SCCJ,\n" +
                "                            t1.CXDW AS JXD,\n" +
                "                            t1.CREATE_TIME AS RQ,\n" +
                "                            t1.BCZXGL,\n" +
                "                            CONCAT(t1.XCCX, '-', t1.XCCH) AS XCXH,\n" +
                "                            t1.XCWZ,\n" +
                "                            DATE(t1.JXRQ_JS) AS XCRQ,\n" +
                "                            CONCAT(t1.SCCX, '-', t1.SCCH) AS SCXH,\n" +
                "                            t1.SCWZ,\n" +
                "                            DATE(t1.JXRQ_JS) AS SCRQ\n" +
                "                     FROM dim_jmis_t_jxgl_jxjld_jbxx t1\n" +
                "                     WHERE t1.RECORD_STATUS = '0' " +
                "AND t1.SCCX IN ('HXD3', 'HXD3C', 'HXD1D', 'HXD1C') AND t1.LBJBM IN ('PJ00177', 'PJ00033')\n" +
                "                   ) t2\n" +
                "            ) t3\n" +
                "       WHERE t3.rk = 1\n" +
                "     ) a2\n" +
                "GROUP BY a2.SCXH, a2.LBJBM, a2.SCWZ").createOrReplaceTempView("jxxx2");
        // 临时表表名: ps_gxzz_jcll_zx_jxxx_all
        // ps + gzzd + jcll_1 + zx1 + zx2 + zx3 + jxxx 几张表进行关联,关联后的字段数据
        // s_train_id,cxm,ch,lj_id,lj,ljjc,psd_id,psd,psdjc,ljpxm,jcdllxmc,wz,sysfl,sysflcode1,sysflcode2,
        // pjflm,pjflbm,gxmc,pjxlh,pjdjm,zzrq,sccj,gzxh,changetime,ljzxgl,gjxhzx,zjxhzx,jcll_jxxx,jxjld_jxxx,JXXX
        spark.sql("" +
                "select distinct a1.*,a2.jxjld_jxxx,jxxxOrderGroupUDF(a1.jcll_jxxx, a2.jxjld_jxxx) AS JXXX " +
                "from ps_gxzd_jcll_zx_jxxx a1 left join jxxx2 a2 on a1.S_TRAIN_ID = a2.SCXH AND a1.PJFLBM = a2.LBJBM AND a1.WZ = a2.SCWZ").createOrReplaceTempView("ps_gxzz_jcll_zx_jxxx_all");
        //读取配置文件中的kafka相关参数
        HashMap<String, Object> kafkaParams = InitKafka.init("TransFault111111111");

        //获取配置文件topic名称
        String tcds_tcms_fault_topic = getProperties.getPropertiesKey("tcds_tcms_bd_topic1", "config.properties");
        final HashSet<String> topics = new HashSet<>(Arrays.asList(tcds_tcms_fault_topic.split(",")));
        //消费kafka中topic数据
        JavaInputDStream<ConsumerRecord<String, String>> directStream = KafkaUtils.createDirectStream(
                jsc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams)
        );
        JavaDStream<String> lineValue = directStream.map(new Function<ConsumerRecord<String, String>, String>() {
            @Override
            public String call(ConsumerRecord<String, String> stringStringConsumerRecord) throws Exception {
                String value = stringStringConsumerRecord.value();
                return value;
            }
        });

        lineValue.foreachRDD(new VoidFunction<JavaRDD<String>>() {
            @Override
            public void call(JavaRDD<String> stringJavaRDD) throws Exception {
                if (!stringJavaRDD.isEmpty()) {
                    // 每个批次有数据处理之前,需要先判断当前车是否回库整备,如果整备,则刷新`parts_label`这个表中字段 `pgcszb`中的 "模型结果":"A","模型转义":"正常"
                    // 获取aei表中每个车的运行状态,这里获取 每个车 TRAIN_STATUS = 0 的数据,状态为0,则对应的时间就是出库时间
                    MysqlUtils_3.loadTrainStatusFromPhoenix(spark, "in ('HXD1D','HXD1C','HXD3','HXD3C')", "aei_run");

                    /*
                    MysqlUtils_3.readMysqlToTempTable(spark, "train_status");
                    spark.sql("select s_train_id,jccx as loco_type,ch as loco_no,train_status_time as cksj,train_status " +
                            " from train_status" +
                            " where train_status = '0' and jccx in('HXD1D','HXD1C','HXD3','HXD3C')").createOrReplaceTempView("aei_run");
                            */

                    // 临时表: tcmsEvent kafka 实时数据字段
                    //CX,CH,S_TRAIN_ID,SSXT,XTBM,XTPJM,SSBJ,BJBM,BJPJM,cmd_gzm,GZDM,GZDMSHOW,phm_gzm,phm_bjwz,phm_lb,phm_mc,GZMC,phm_zd,
                    //IDX,GZKSSJ_UP,GZKSSJ,LJ_ID,lj,ljjc,PSD_ID,psd,psdjc,YYD_ID,yyd,yydjc,zzc,zzrq,psrq,JCDLLXMC,ljpxm,
                    //SOURCE,ZJ_MS,ZJ_FA,JXRQ,RC,RT,CXDMC,ljzxgl_tcms,rjzx_tcms,xhzxgl_tcms,yyzl,zdlb,gzm
                    spark.read().json(stringJavaRDD)
//                            .filter(expr("bjpjm in ('PJ00033','PJ00177') and phm_lb != '-' and phm_lb is not null and phm_lb != '' "))
                            .filter(expr("bjpjm in ('PJ00033','PJ00177') "))
                            .selectExpr("CX", "CH", "S_TRAIN_ID", "SSXT", "XTBM", "XTPJM", "SSBJ", "BJBM", "BJPJM", "cmd_gzm", "GZDM", "GZDMSHOW", "phm_gzm", "phm_bjwz", "phm_lb", "phm_mc", "GZMC", "phm_zd",
                                    "IDX", "GZKSSJ_UP", "GZKSSJ", "LJ_ID", "lj", "ljjc", "PSD_ID", "psd", "psdjc", "YYD_ID", "yyd", "yydjc", "zzc", "zzrq", "psrq", "JCDLLXMC", "ljpxm",
                                    "SOURCE", "ZJ_MS", "ZJ_FA", "JXRQ", "RC", "RT", "CXDMC", "ljzxgl as ljzxgl_tcms", "rjzx as rjzx_tcms", "xhzxgl as xhzxgl_tcms")
                            .withColumn("yyzl", lit("-"))
                            .withColumn("zdlb", lit("模型诊断信息"))
                            .withColumn("gzm", expr("CASE WHEN cmd_gzm <> '' AND cmd_gzm IS NOT NULL THEN cmd_gzm WHEN GZDM <> '' AND GZDM IS NOT NULL THEN GZDM WHEN GZDMSHOW <> '' AND GZDMSHOW IS NOT NULL THEN GZDMSHOW ELSE phm_gzm END"))
                            .createOrReplaceTempView("tcmsEvent");

                    // 读取模型详情表字典表中的数据
                    spark.read()
                            .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "t_phm_mxxq", mysqlProperties_new)
                            .createOrReplaceTempView("t_phm_mxxq");
                    spark.sql("" +
                            "select mx_id,syjx,mxmc,mxjg,bjpjm,jgms,xxms,czjy,zbjy,jxjy,yfyy,phm_gzm\n" +
                            "from t_phm_mxxq\n" +
                            "where syjx in ('HXD1D', 'HXD1C', 'HXD3', 'HXD3C') and bjpjm in ('PJ00033', 'PJ00177')").createOrReplaceTempView("mxxq");
                    // MysqlUtils_3.readMysqlToTempTable(spark, "t_phm_mxxq");


                    // 获取在线上跑的车的故障信息 关联 车的基本信息以及检修信息,获取后面用到的所有字段,为了防止消费到车在整备阶段报的故障信息
                    // System.out.println("实时数据 + aei + ps_jcll_jxxx_all 所有字段 = = = = =  =");
                    Dataset<Row> tcms_jcll_mxxq_DS = spark.sql("" +
                                    "select t1.*,t2.cksj,t2.train_status,t1.GZKSSJ as GZKSSJ1," +
                                    "t3.cx_id,t3.sysfl,t3.sysflcode1,t3.sysflcode2,t3.pjflm,t3.pjflbm,t3.gxmc,t3.pjxlh,t3.pjdjm,t3.sccj,t3.gzxh as ggxh_th,t3.changetime,t3.ljzxgl,t3.gjxhzx,t3.zjxhzx,t3.jcll_jxxx,t3.jxjld_jxxx,t3.JXXX," +
                                    "t4.mx_id,t4.mxmc,t4.mxjg,t4.jgms,t4.xxms,t4.czjy,t4.zbjy,t4.jxjy,t4.yfyy " +
                                    "from tcmsEvent t1  " +
                                    "join aei_run t2 on t1.s_train_id = concat(t2.loco_type, '-', t2.loco_no) " +
                                    "join ps_gxzz_jcll_zx_jxxx_all t3 on t1.s_train_id = t3.S_TRAIN_ID and t1.phm_bjwz = t3.WZ and t1.BJPJM = t3.PJFLBM  " +
                                    "join mxxq t4 on t1.gzm = t4.phm_gzm and t1.cx = t4.syjx and t1.BJPJM = t4.bjpjm")
                            .dropDuplicates()
                            // 这里需要通过: 故障发生时间 >= 当前车的aei中出库时间 并且 当前出库时间 > 入库时间的数据
                            .filter(expr("gzkssj > cksj"));
                    Dataset<Row> adsPhmFaultDS = tcms_jcll_mxxq_DS.selectExpr(
                                    "s_train_id",
                                    "idx AS s_fault_id",
                                    "concat_ws('-',train_status,s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS mx_id",
                                    "s_train_id AS s_train_name",
                                    "cx AS s_train_type_code",
                                    "source AS s_fault_source",
                                    "gzkssj_up AS s_fault_time",
                                    // "SSXT", "XTBM", "XTPJM", "SSBJ", "BJBM", "BJPJM",
                                    //  "t3.sysfl,t3.sysflcode1,t3.sysflcode2,t3.pjflm,t3.pjflbm,t3.gxmc,t3.pjxlh,t3.pjdjm,
                                    "case when ssxt <> '' and ssxt is not null then ssxt else sysfl end as s_fault_sys",
                                    "case when xtbm <> '' and xtbm is not null then xtbm else sysflcode1 end AS s_fault_sys_code1",
                                    "case when xtpjm <> '' and xtpjm is not null then xtpjm else sysflcode2 end  AS s_fault_sys_code2",
                                    "case when ssbj <> '' and ssbj is not null then ssbj else pjflm end as s_fault_bw",
                                    "case when bjbm <> '' and bjbm is not null then bjbm else pjflbm end as s_fault_bw_code1",
                                    "bjpjm as s_fault_bw_code2",
                                    "gzm AS s_fault_code",
                                    "gzmc AS s_fault_name",
                                    "phm_lb",
                                    "mxjg",
                                    "phm_mc AS phm_gzmc",
                                    "phm_bjwz",
                                    "zj_ms",
                                    "zj_fa",
                                    "pjxlh",
                                    "pjdjm",
                                    "1 AS s_htzt",
                                    "0 AS s_hkzt",
                                    "1 AS s_xfzt"
                            )
                            // TODO 2023-10-12 ads_phm_fault 表中 添加入库时间字段
                            .withColumn("create_time", current_timestamp())
                            .dropDuplicates("s_fault_id");
                    // 测试通过,入 ads_phm_fault 表中的数据没有问题.
                    MysqlUtils_3.writeDataToMysqlTable(adsPhmFaultDS, "ads_phm_fault", SaveMode.Append);
                    // 拉取当前批次中入 t_phm_sqwx 表中的字段. 这里需要进行聚合操作,
                    // TODO 2023-05-28: 修改入 t_phm_sqwx 这个表中 mxmc 字段的取值: 由以前的 jgms 字段 --> mxmc 字段.
                    Dataset<Row> sqwxLocalBatchDS = tcms_jcll_mxxq_DS.selectExpr("concat_ws('-',train_status,s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS ID", "s_train_id",
                            "cx",
                            "cx_id",
                            "ch",
                            "lj",
                            "ljjc",
                            "lj_id AS ljm",
                            "psd",
                            "psdjc",
                            "psd_id AS psddm",
                            // ssxt、xtpjm 、ssbj、bjpjm 这几个字段取的是 tcms实时数据中的字段,如果要去 ps_gxzd_jcll_1_zx1 这几张表关联好的,应该取的字段:  sysfl,sysflcode2,pjflm,这里先取 tcms的字段,如果没有取到值,则取 ps_gxzd_jcll_1_zx1
                            "case when ssxt <> '' and ssxt is not null then ssxt else sysfl end as ssxt",
                            "case when xtpjm <> '' and xtpjm is not null then xtpjm else sysflcode2 end as xtpjm",
                            "case when ssbj <> '' and ssbj is not null then ssbj else pjflm end as ssbj",
                            "bjpjm",
                            "phm_bjwz AS bjwz",
                            "pjxlh AS bjxlh",
                            "pjdjm AS bjdjm",
                            "mxjg AS mx",
                            // 2023-05-28 修改 表事情维修表中 mxmc 这个字段的取值,由以前的取模型详情表中的 jgms 这个字段,修改为:MXMC 这个字典的值.
                            //"jgms AS mxmc",
                            "mxmc",
                            "xxms AS mxms",
                            "current_timestamp AS create_time",
                            "czjy",
                            "1 AS xfzt",
                            "1 AS htzt",
                            "0 AS hkzt",
                            // 这里有点问题,以前定的是: 0-变压器 1-变流器,其他部件呢 ? 确认后先空着,这里先用 '-' 占位
                            "'1' AS status",
                            "jcdllxmc AS dllx",
                            "rc",
                            "'-' AS yyzl",
                            "zzc AS sccj",
                            "zzrq",
                            "ljzxgl AS ljzx",
                            "gzm",
                            "SOURCE AS gzly",
                            "zdlb",
                            // 这里的发生时间,上次最后代码ch要求修改过,取的时间是 故障发生的最小时间.
                            "gzkssj_up as fssj",
                            "yfyy",
                            "zbjy AS hkzbjy",
                            "JXJY AS hkjxjy"
                    );

                    // 获取当前趟中 t_phm_sqwx 历史数据,和上面的当前批次中的数据进行 union,再进行关键字段的聚合,最终将当前执行结果更新写操作入表.
                    spark.read()
                            .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "t_phm_sqwx", mysqlProperties_new)
                            .createOrReplaceTempView("t_phm_sqwx");
                    // MysqlUtils_3.readMysqlToTempTable(spark, "t_phm_sqwx");
                    Dataset<Row> sqwxHistoryDS = spark.sql("select id, s_train_id, cx, cx_id,ch, lj, ljjc, ljm, psd, psdjc, psddm, ssxt, xtpjm, ssbj, bjpjm, bjwz, bjxlh, bjdjm, " +
                            "mx, mxmc, mxms, create_time, czjy, xfzt, htzt,hkzt, status, dllx, rc, yyzl, sccj, zzrq, ljzx, gzm, gzly, zdlb, fssj, yfyy, hkzbjy, hkjxjy " +
                            "from t_phm_sqwx where HKZT = '0' and BJPJM IN ('PJ00033', 'PJ00177') and CX IN ('HXD1D','HXD1C','HXD3C','HXD3')");

                    Dataset<Row> sqwxUpdataDS = sqwxLocalBatchDS.union(sqwxHistoryDS)
                            .groupBy("id", "s_train_id", "cx", "cx_id", "ch", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "dllx", "rc", "yyzl", "sccj", "zzrq", "ljzx", "gzly", "zdlb")
                            .agg(
                                    concat_ws(",", collect_set(col("mx"))).alias("mx"),
                                    concat_ws(",", collect_set(col("mxmc"))).alias("mxmc"),
                                    concat_ws(",", collect_set(col("mxms"))).alias("mxms"),
                                    concat_ws(",", collect_set(col("czjy"))).alias("czjy"),
                                    concat_ws(",", collect_set(col("yfyy"))).alias("yfyy"),
                                    concat_ws(",", collect_set(col("gzm"))).alias("gzm"),
                                    concat_ws(",", collect_set(col("hkzbjy"))).alias("hkzbjy"),
                                    concat_ws(",", collect_set(col("hkjxjy"))).alias("hkjxjy"),
                                    min("fssj").alias("fssj")
                            )
                            .withColumn("create_time", current_timestamp())
                            .withColumn("mx", expr("distinctString(mx)"))
                            .withColumn("mxmc", expr("distinctString(mxmc)"))
                            .withColumn("mxms", expr("distinctString(mxms)"))
                            .withColumn("czjy", expr("distinctString(czjy)"))
                            .withColumn("yfyy", expr("distinctString(yfyy)"))
                            .withColumn("gzm", expr("distinctString(gzm)"))
                            .withColumn("hkzbjy", expr("distinctString(hkzbjy)"))
                            .withColumn("hkjxjy", expr("distinctString(hkjxjy)"));
                    // 写入 t_phm_sqwx 中数据
                    MysqlUtils_3.upsertDatasetToMySQL(sqwxUpdataDS, "t_phm_sqwx");

                    Dataset<Row> tcms_jkpgDs = tcms_jcll_mxxq_DS.selectExpr("s_train_id", "bjpjm", "phm_bjwz", "mx_id", "MXJG", "xxms");
                    spark.read()
                            .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATH), "ads_phm_fault", mysqlProperties_new)
                            .createOrReplaceTempView("ads_phm_fault");
                    // MysqlUtils_3.readMysqlToTempTable(spark, "ads_phm_fault");
                    // 拉取 ads_phm_fault 表中当前趟中的历史数据,用来做 模型粒度的健康评估. ads_phm_fault
                    Dataset<Row> ads_history_jkpgDs = spark.sql("select s_train_id,\n" +
                            "       S_FAULT_BW_CODE2                                         as bjpjm,\n" +
                            "       phm_bjwz,\n" +
                            "       substring_index(substring_index(MX_ID, '-', 5), '-', -1) as mx_id,\n" +
                            "       MXJG,    \n" +
                            "       phm_gzmc                                                 as xxms\n" +
                            "from ads_phm_fault\n" +
                            "where S_HKZT = '0' and S_FAULT_BW_CODE2 IN ('PJ00033','PJ00177') and s_train_type_code IN ('HXD1D','HXD1C','HXD3','HXD3C')");
                    tcms_jkpgDs.union(ads_history_jkpgDs).groupBy("s_train_id", "bjpjm", "phm_bjwz", "mx_id", "MXJG")
                            .agg(
                                    collect_set(col("xxms")).as("xxms")

                            )
                            .withColumn("cx", split(col("s_train_id"), "-").getItem(0))
                            //.withColumn("pgjg", when(size(col("xxms")).geq(2), "C").otherwise("B"))
                            //.withColumn("pgjg", when(col("MXJG").rlike("冷却塔").or(col("MXJG").rlike("牵引风机")), lit("C")).otherwise("A"))
                            .withColumn("pgjg", lit("D"))
                            .withColumn("ZHPJ_ZTZL", when(col("pgjg").equalTo("D"), lit("D")).otherwise("A"))
                            .withColumn("xxms", concat_ws(",", col("xxms")))
                            .withColumn("mxzy", when(col("pgjg").equalTo("D"), "故障").otherwise("正常"))
                            .withColumn("PGCSZB", expr("CONCAT('{\"模型ID\":', mx_id, ',\"模型简称\":\"', mxjg, '\",\"模型结果\":\"', pgjg, '\",\"模型结果描述\":\"', xxms,'\",\"模型转义\":\"', mxzy, '\"}')"))
                            .withColumn("PGCSZB", expr(" case\n" +
                                    "           when cx = 'HXD3' and BJPJM = 'PJ00033' then concat(PGCSZB,'&{\"模型ID\":10059,\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10060,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD3' and BJPJM = 'PJ00177' then concat(PGCSZB,'&{\"模型ID\":10047,\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10048,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD3C' and BJPJM = 'PJ00033' then concat(PGCSZB,'&{\"模型ID\":10056,\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10057,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD3C' and BJPJM = 'PJ00177' then concat(PGCSZB,'&{\"模型ID\":10044,\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10045,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD1D' and BJPJM = 'PJ00033' then concat(PGCSZB,'&{\"模型ID\":10156,\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10157,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD1D' and BJPJM = 'PJ00177' then concat(PGCSZB,'&{\"模型ID\":10144,\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10145,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD1C' and BJPJM = 'PJ00033' then concat(PGCSZB,'&{\"模型ID\":10256,\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10257,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "           when cx = 'HXD1C' and BJPJM = 'PJ00177' then concat(PGCSZB,'&{\"模型ID\":10244,\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10245,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                                    "else '' end PGCSZB"))
                            .dropDuplicates()
                            .createOrReplaceTempView("pgjg_temp");


                    Dataset<Row> tcmsPgDS = spark.sql("select t2.*, t1.PGCSZB,t1.ZHPJ_ZTZL " +
                                    "from pgjg_temp t1 join ps_gxzz_jcll_zx_jxxx_all t2 on t1.s_train_id = t2.S_TRAIN_ID and t1.bjpjm = t2.PJFLBM and t1.phm_bjwz = t2.WZ ")
                            // 2023-06-08 : 添加 "lj_id","lj","ljjc","psd_id","psd","psdjc",
                            .selectExpr("CXM as cx", "cx_id", "CH",
                                    "pjflm",
                                    "WZ", "PJXLH", "PJDJM",
                                    "ZZRQ",
                                    "SCCJ",
                                    "LJ_ID", "LJ", "ljjc", "PSD_ID", "PSD", "psdjc", "ljpxm",
                                    "ggxh_th", "S_TRAIN_ID", "PJFLBM", "LJZXGL", "GJXHZX", "ZJXHZX",
                                    "SysFL",
                                    "SysFLCode1",
                                    "sysflcode2",
                                    "JXXX", "PGCSZB", "ZHPJ_ZTZL");

                    // 所有车的所有信息
                    Dataset<Row> rowDataset = spark.sql("select t1.*,t3.BJPJM,concat(t3.PGCSZB,t3.PGCSZB_1) as PGCSZB,'A' as ZHPJ_ZTZL" +
                            " from ps_gxzz_jcll_zx_jxxx_all  t1 " +
                            " left join pgjg_temp t2 on t1.s_train_id = t2.S_TRAIN_ID and t2.bjpjm = t1.PJFLBM and t2.phm_bjwz = t1.WZ " +
                            " left join ( " +
                            "    select distinct MX_ID,\n" +
                            "                    SYJX,\n" +
                            // 所属系统、系统编码、系统PJM、所属部件、部件编码、部件PJM
                            "                    SSXT,\n" +
                            "                    XTBM,\n" +
                            "                    XTPJM,\n" +
                            "                    SSBJ,\n" +
                            "                    BJBM,\n" +
                            "                    BJPJM,\n" +
                            "                    MXMC,\n" +
                            "                    MXJG,\n" +
                            "                    BJPJM,\n" +
                            "                    '' JGMS,\n" +
                            "                    CONCAT('{\"模型ID\":', mx_id, ',\"模型简称\":\"', MXJG, '\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}') as PGCSZB,\n" +
                            "           case\n" +
                            "           when SYJX = 'HXD3' and BJPJM = 'PJ00033'  then '&{\"模型ID\":\"10059\",\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10060\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD3' and BJPJM = 'PJ00177'  then '&{\"模型ID\":\"10047\",\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10048\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD3C' and BJPJM = 'PJ00033' then '&{\"模型ID\":\"10056\",\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10057\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD3C' and BJPJM = 'PJ00177' then '&{\"模型ID\":\"10044\",\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10045\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD1D' and BJPJM = 'PJ00033' then '&{\"模型ID\":\"10156\",\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10157\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD1D' and BJPJM = 'PJ00177' then '&{\"模型ID\":\"10144\",\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10145\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD1C' and BJPJM = 'PJ00033' then '&{\"模型ID\":\"10256\",\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10257\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD1C' and BJPJM = 'PJ00177' then '&{\"模型ID\":\"10244\",\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10245\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           else ''\n" +
                            "           end PGCSZB_1" +
                            "    from t_phm_mxxq\n" +
                            "    where SYJX in ('HXD3', 'HXD3C','HXD1D','HXD1C') and BJPJM in ('PJ00033', 'PJ00177') and MXMC like '%TCMS预报警故障%'" +
                            ") t3 on t1.CXM = t3.syjx and t1.PJFLBM = t3.bjpjm " +
                            "where t2.s_train_id is null " +
                            "order by S_TRAIN_ID,wz,BJPJM");
                    Dataset<Row> finalDs = rowDataset
                            // 2023-06-08 : 添加 "lj_id","lj","ljjc","psd_id","psd","psdjc",
                            .selectExpr("CXM as cx", "cx_id", "CH", "PJFLM", "WZ", "PJXLH", "PJDJM", "ZZRQ", "SCCJ", "LJ_ID", "LJ", "ljjc", "PSD_ID", "PSD",
                                    "psdjc", "ljpxm", "GGXH_TH", "S_TRAIN_ID", "PJFLBM", "LJZXGL", "GJXHZX", "ZJXHZX", "SysFL", "SysFLCode1", "SysFLCode2", "JXXX", "PGCSZB", "ZHPJ_ZTZL")
                            .union(tcmsPgDS)
                            .withColumn("JWDJC", col("psdjc"))
                            .withColumn("id", expr("uuid()"))
                            .withColumn("UPDATATIME", date_format(expr("now()"), "yyyy-MM-dd HH:mm:ss"))
                            // 2023-06-08 添加 "lj_id","lj","ljjc","psd_id","psd","psdjc",
                            .select("ID", "cx", "cx_id", "S_TRAIN_ID",
                                    // 所属部件(PJFLM)、部件编码(BJBM)、部件PJM(PJFLBM),部件单件码(PJDJM)、部件序列号(PJXLH)、构型序号(GGXH_TH)
                                    "PJFLM", "PJFLBM", "PJDJM", "PJXLH", "GGXH_TH", "ZZRQ", "SCCJ",
                                    "LJ_ID", "LJ", "LJJC", "PSD_ID", "PSD", "JWDJC", "LJPXM",
                                    "LJZXGL", "GJXHZX", "ZJXHZX", "JXXX", "PGCSZB", "UPDATATIME", "WZ",
                                    // 所属系统、系统编码、系统PJM
                                    "SYSFL", "SYSFLCODE1", "SYSFLCODE2", "ZHPJ_ZTZL");
                    // .withColumn("PJFLM", when(col("PJFLM").isNull().and(col("PJFLBM").equalTo("PJ00033")), "牵引通风机").otherwise("冷却塔风机")),
                    Dataset<Row> finDs = finalDs.withColumn("SYSFL", expr("case when  PJFLBM = 'PJ00033' then '牵引通风机' when PJFLBM = 'PJ00177' then '冷却塔风机' else '' end "))
                            .withColumn("PJFLM", expr("case when PJFLBM = 'PJ00033' then '牵引通风机' when PJFLBM = 'PJ00177' then '冷却塔风机' else '' end "))
                            // 部件编码
                            .withColumn("BJBM", expr("case when PJFLBM = 'PJ00033' then '0122070101' when PJFLBM = 'PJ00177' then '01220702' else '' end "))
                            // 系统编码
                            .withColumn("sysfl", lit("通风系统"))
                            // 系统分类码
                            .withColumn("sysflcode1", expr("case when PJFLBM = 'PJ00033' then '01220701' when PJFLBM = 'PJ00177' then '012207' else '' end "))
                            // 部件编码
                            .withColumn("sysflcode2", expr("case when PJFLBM = 'PJ00033' then 'PJ02353' when PJFLBM = 'PJ00177' then 'PJ02353' else '' end "))
                            .withColumn("ZZRQ", col("ZZRQ").cast("string"))
                            .withColumn("UPDATATIME", col("UPDATATIME").cast("string"));
                    finDs.write().mode("OverWrite").format("org.apache.phoenix.spark")
                            .option("dirver", "org.apache.phoenix.jdbc.PhoenixDriver")
                            .option("zkUrl", "jdbc:phoenix:JWGL-YY-T141149,JWGL-YY-T141150,JWGL-YY-T141151,JWGL-YY-T141152,JWGL-YY-V139150,JWGL-YY-V139151,JWGL-YY-V139152,JWGL-YY-V139153:2181:/hbase-unsecure")
                            .option("table", "LOCOMOTIVE_ADS.PARTS_LABEL").save();
                }
            }
        });

        jsc.start();
        try {
            jsc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        spark.stop();
    }

    // 每个批次有数据处理之前,需要先判断当前车是否回库整备,如果整备,则刷新`parts_label`这个表中字段 `pgcszb`中的 "模型结果":"A","模型转义":"正常"
    public static void flushPartsLabelNotA() throws IOException {
        // 每个批次执行时,都需要去查询当前车的 aei 出库入库时间以及段内段外状态,如果车在段内跑并且当前报警时间 > 出库时间 则参与当前趟聚合操作.如果 当前报警时间 < 当前车的入库时间,则说明当前车在段外整备,当前数据过滤.
        // 拉取ads_phm_fault做健康评估,然后入 parts_label

        // 获取aei表中每个车的运行状态,这里获取 每个车 TRAIN_STATUS = 0 的数据,状态为0,则对应的时间就是出库时间
        MysqlUtils_3.readMysqlToTempTable(spark, "train_status");
        // loco_type,loco_no,cksj,rksj,train_dir,zxsj,status
        spark.sql("select S_TRAIN_ID,jccx,train_status_time as cksj from train_status where TRAIN_STATUS = 0").show(20, false);
        PrepareDimTable.perfectAEI(spark);
        spark.sql("select loco_type,loco_no,cksj,rksj,train_dir,zxsj,status from perAEI" +
                " where train_dir = '0' and loco_type in('HXD1D','HXD1C','HXD3','HXD3C')").createOrReplaceTempView("aei_run");
//        System.out.println(" = = = = = = = aei_run");
//        spark.sql("select * from aei_run").show(20, false);

        MysqlUtils_3.readMysqlToTempTable(spark, "parts_label");
        // 获取`parts_label`表中所有数据,左外关联aei线上跑的车,左表独有就是不在不在线上跑的车,然后过滤到 评估结果不是A的车的信息,修改评估结果.
        Dataset<Row> resetDs = spark.sql("select id,s_train_id,pgcszb from parts_label p left join aei_run a on concat_ws('-',loco_type,loco_no) = p.s_train_id where a.loco_type is null");

        String regexPatternForResult = "\\\"模型结果\\\"\\:\\\"[^\\\"]*\\\"";
        String replacementForResult = "\"模型结果\":\"A\"";
        String regexPatternForTranslate = "\\\"模型转义\\\"\\:\\\"[^\\\"]*\\\"";
        String replacementForTranslate = "\"模型转义\":\"正常\"";

        Dataset<Row> resetPGJGDs = resetDs.orderBy("s_train_id")
                .withColumn("field1", split(col("pgcszb"), "&").getItem(0))
                .withColumn("field2", split(col("pgcszb"), "&").getItem(1))
                .withColumn("field3", split(col("pgcszb"), "&").getItem(2))
                .filter(expr("field1 not like '%\"模型结果\":\"A\"%'"))
                .withColumn(
                        "field1_new",
                        regexp_replace(
                                regexp_replace(
                                        col("field1"),
                                        regexPatternForResult,
                                        replacementForResult
                                ),
                                regexPatternForTranslate,
                                replacementForTranslate
                        )
                )
                .withColumn("pgcszb", concat_ws("&", col("field1_new"), col("field2"), col("field3")))
                .select("id", "s_train_id", "pgcszb");
        resetPGJGDs.show(20, false);
        // 更新写数据
        resetPGJGDs.write().mode("OverWrite").format("org.apache.phoenix.spark")
                .option("dirver", "org.apache.phoenix.jdbc.PhoenixDriver")
                .option("zkUrl", "jdbc:phoenix:JWGL-YY-T141149,JWGL-YY-T141150,JWGL-YY-T141151,JWGL-YY-T141152,JWGL-YY-V139150,JWGL-YY-V139151,JWGL-YY-V139152,JWGL-YY-V139153:2181:/hbase-unsecure")
                .option("table", "LOCOMOTIVE_ADS.PARTS_LABEL").save();
    }
}
