package org.example.dev;


import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.example.ch.InitKafka;
import org.example.ch.MyPropertiesUtil;
import org.example.ch.PrepareDimTable;
import org.example.ch.getProperties;
import org.example.develop.StringUtilsUDF;
import org.example.develop.jxxxOrderGroupUDF;
import org.example.utils.MysqlUtils_2;

import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Properties;

import static org.apache.spark.sql.functions.*;

/**
 * HXD3 HXD3C 机型 牵引通风机视情维修模型 + 冷却塔风机视情维修模型.
 * <p>
 * 冷却塔风机 PJ00177  | 牵引通风机 PJ00033
 */
public class Hxd3AndHxd3c_7_8 {
    private static final String CONFIGPATHS = "config.properties";
    public static SparkSession spark = null;
    public static JavaSparkContext sc = null;

    public static void main(String[] args) throws AnalysisException {
        SparkConf conf = new SparkConf()
                .setAppName("hxd3_hxd3c_qytfj_lqtfj_mx")
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .set("spark.driver.allowMultipleContexts", "true")
                .set("spark.hadoop.validateOutputSpecs", "false")
                .set("hive.mapred.supports.subdirectories", "true")
                .set("spark.streaming.backpressure.enabled", "true")
                .set("spark.streaming.kafka.maxRatePerPartition", "60")
                .set("spark.default.parallelism", "100")
                .set("spark.sql.shuffle.partitions", "20")
                .set("mapreduce.input.fileinputformat.input.dir.recursive", "true")
                .set("spark.scheduler.listenerbus.eventqueue.capacity", "100000")
                .set("spark.streaming.kafka.consumer.cache.enabled", "false")
                .set("spark.debug.maxToStringFields", "100");
        sc = new JavaSparkContext(conf);
        sc.setLogLevel("ERROR");
        // JavaStreamingContext jsc = new JavaStreamingContext(sc, Durations.seconds(300));
        JavaStreamingContext jsc = new JavaStreamingContext(sc, Durations.seconds(300));
        spark = SparkSession.builder().appName("hxd3andhxd3c_7_8").getOrCreate();


        spark.udf().register("distinctString", new UDF1<String, String>() {
            public String call(String input) {
                return StringUtilsUDF.distinctString(input);
            }
        }, DataTypes.StringType);

        Properties mysqlProperties_main2 = new Properties();
        mysqlProperties_main2.setProperty("user", MyPropertiesUtil.getPropertiesKey("mysql_user", "config.properties"));
        mysqlProperties_main2.setProperty("password", MyPropertiesUtil.getPropertiesKey("mysql_pwd", "config.properties"));
        mysqlProperties_main2.setProperty("driver", MyPropertiesUtil.getPropertiesKey("mysql_driver", "config.properties"));

        Properties mysqlProperties_old = new Properties();
        mysqlProperties_old.setProperty("user", getProperties.getPropertiesKey("jmis_mysql_user", CONFIGPATHS));
        mysqlProperties_old.setProperty("password", getProperties.getPropertiesKey("jmis_mysql_pwd", CONFIGPATHS));
        mysqlProperties_old.setProperty("driver", getProperties.getPropertiesKey("mysql_driver", CONFIGPATHS));
        mysqlProperties_old.setProperty("url", getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS));


        Properties mysqlProperties_main = new Properties();
        mysqlProperties_main.setProperty("user", MyPropertiesUtil.getPropertiesKey("jmis_mysql_user", "config.properties"));
        mysqlProperties_main.setProperty("password", MyPropertiesUtil.getPropertiesKey("jmis_mysql_pwd", "config.properties"));
        mysqlProperties_main.setProperty("driver", MyPropertiesUtil.getPropertiesKey("mysql_driver", "config.properties"));


        //获取mysql字典字段表并生成临时表
        //MysqlUtils.init("dim_jmis_jcda_jcll_1", spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_jcda_jcll_1", mysqlProperties_main)
                .createOrReplaceTempView("dim_jmis_jcda_jcll_1");

        //MysqlUtils.init("jxjld_jbxx", spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_t_jxgl_jxjld_jbxx", mysqlProperties_main)
                .createOrReplaceTempView("jxjld_jbxx");
        // 使用机型的构型信息,表中字段:SYCX, LBJBM, WZMC
        // MysqlUtils.init("dim_jmis_t_jcbm_jcgxzd", spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_t_jcbm_jcgxzd", mysqlProperties_main)
                .createOrReplaceTempView("dim_jmis_t_jcbm_jcgxzd");
        // MysqlUtils.init("t_phm_mxxq", spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATHS), "T_PHM_MXXQ", mysqlProperties_main2)
                .createOrReplaceTempView("t_phm_mxxq");
        // MysqlUtils.init("ads_phm_fault", spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATHS), "ads_phm_fault", mysqlProperties_main2)
                .createOrReplaceTempView("ads_phm_fault");
        // MysqlUtils.init("t_phm_sqwx", spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("mysql-url", CONFIGPATHS), "t_PHM_SQWX", mysqlProperties_main2)
                .createOrReplaceTempView("t_phm_sqwx");
        //走行数据临时表
        //MysqlUtils.init("dim_jmis_t_jcbm_zjxc", spark);
        //MysqlUtils.init("dim_jmis_jcda_zxgl_day", spark);
        //MysqlUtils.init("dim_jmis_jcda_t_jxglzxgl_v", spark);
        //MysqlUtils.init("dim_jmis_jxgl_t_jxgl_jxdt", spark);
        //最近修程 zjxc
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_t_jcbm_zjxc", mysqlProperties_main)
                .createOrReplaceTempView("dim_jmis_t_jcbm_zjxc");
        PrepareDimTable.Init_ZJXC(spark);
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_jcda_zxgl_day", mysqlProperties_main)
                .createOrReplaceTempView("dim_jmis_jcda_zxgl_day");
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_jcda_t_jxglzxgl_v", mysqlProperties_main)
                .createOrReplaceTempView("dim_jmis_jcda_t_jxglzxgl_v");
        spark.read()
                .jdbc(getProperties.getPropertiesKey("jmis_mysql_url", CONFIGPATHS), "dim_jmis_jxgl_t_jxgl_jxdt", mysqlProperties_main)
                .createOrReplaceTempView("dim_jmis_jxgl_t_jxgl_jxdt");

        // 计算走行公里并生成临时表
        calculateTheMilesTraveled();
        // 冷却塔风机 PJ00177 | 牵引通风机 PJ00033
        //所有车 ps_cx_dllx_lj_jwd
        PrepareDimTable.ps_cx_dllx_lj_psd(spark);
        //|s_train_id|CX  |CX_ID|CH  |LJ_ID|LJ       |ljjc|PSD_ID|PSD      |psdjc |YYD_ID|YYD     |yydjc |zzc                  |zzrq      |psrq      |JCDLLXMC    |ljpxm|S_TRAIN_TYPE|
        //|HXD3-0229 |HXD3|233  |0229|05   |郑州铁路局|郑   |0504  |洛阳机务段|洛    |0504  |洛阳机务段|洛    |中车北京二七机车有限公司|2007-06-27|2020-04-22|电力交流机车  |6    |0           |
        // TODO lj_id,lj,ljjc,psd_id,psd,psdjc,ljpxm
        spark.sql("select s_train_id,cx,cx_id,ch,lj_id,lj,ljjc,psd_id,psd,psdjc,yyd_id,yyd,yydjc,zzc,zzrq,psrq,jcdllxmc,ljpxm,s_train_type from ps_cx_dllx_lj_jwd").filter("cx in ('HXD3','HXD3C')").createOrReplaceTempView("ps_cx_dllx_lj_jwd");
        //初始化AEI数据 aei
        //PrepareDimTable.InitAEI(spark);
        PrepareDimTable.perfectAEI(spark);                 //较完善AEI数据 perAEI
        //处理后的aei数据,在关联当前 kafka 中 tcms 的实时数据时会用到.
        spark.sql("select loco_type,loco_no,train_dir,rksj as cksj  from perAEI  where loco_type in ('HXD3','HXD3C' )and train_dir = '0'").createOrReplaceTempView("aei");

        spark.sql("" +
                "SELECT CXM, CH, GXMC, PJFLBM, WZ, PJXLH, PJDJM, S_TRAIN_ID, ZZRQ, SCCJ, GZXH, CHANGETIME\n" +
                "FROM (\n" +
                "         SELECT CXM, CH, GXMC, PJFLBM, WZ, PJXLH, PJDJM, CONCAT(CXM, '-', CH) AS S_TRAIN_ID, ZZRQ, SCCJ, GZXH,\n" +
                "                CHANGETIME, row_number() OVER (PARTITION BY CXM, CH, PJFLBM, WZ ORDER BY CHANGETIME DESC) AS rank_jcll\n" +
                "         FROM dim_jmis_jcda_jcll_1\n" +
                "         WHERE CXM IN ('HXD3C', 'HXD3') AND PJFLBM IN ('PJ00033', 'PJ00177')\n" +
                "     ) jcll_temp\n" +
                "WHERE rank_jcll = '1'\n" +
                "order by CXM desc").createOrReplaceTempView("dim_jmis_jcda_jcll_temp");

//        System.out.println("dim_jmis_jcda_jcll_temp jcll表中数据++++++++++");
//        spark.sql("select * from dim_jmis_jcda_jcll_temp").show(10, false);

        spark.udf().register("jxxxOrderGroupUDF", new jxxxOrderGroupUDF(), DataTypes.StringType);

        // 从ps_cx_dllx_lj_jwd表和dim_jmis_jcda_jcll_temp表中选择需要的字段，并将两个表通过配属左连接jcll数据
        spark.sql("" +
                "SELECT ps.s_train_id AS S_TRAIN_ID, ps.CX AS CXM, ps.CH AS CH," +
                // 2023-06-08 添加入 parts_label_1 表中的路局和配属段信息 下面的6个字段. ps.lj_id,ps.lj,ps.ljjc,ps.psd_id,ps.psd,ps.psdjc,
                "       ps.LJ_ID,ps.LJ,ps.ljjc,ps.PSD_ID,ps.PSD,ps.psdjc,ps.ljpxm, gxzd.WZMC AS WZ, gxzd.SysFL, gxzd.SysFLCode1,\n" +
                "       gxzd.SysFLCode2, jcll.GXMC AS GXMC, if(jcll.PJFLBM is null, gxzd.LBJBM, jcll.PJFLBM) AS PJFLBM,\n" +
//                "       jcll.PJXLH AS PJXLH, jcll.PJDJM AS PJDJM, ps.zzrq AS ZZRQ, ps.ZZC AS SCCJ, jcll.GZXH AS GZXH,\n" +
//                "       jcll.PJXLH AS PJXLH, jcll.PJDJM AS PJDJM, jcll.zzrq AS ZZRQ, jcll.ZZC AS SCCJ, jcll.GZXH AS GZXH,\n" +
                "       jcll.PJXLH AS PJXLH, jcll.PJDJM AS PJDJM, jcll.zzrq AS ZZRQ, jcll.SCCJ, jcll.GZXH AS GZXH,\n" +
                "       jcll.CHANGETIME AS CHANGETIME, cast(round(z1.LJZX) as string) LJZXGL, " +
                "cast(round(z2.LJZX) as string) GJXHZX, cast(round(z3.LJZX) as string) ZJXHZX\n" +
                "FROM ps_cx_dllx_lj_jwd ps LEFT JOIN (\n" +
                "    select SYCX, LBJBM, WZMC, FLJC as SysFL, FLBM as SysFLCode1, LBJBM as SysFLCode2\n" +
                "    from dim_jmis_t_jcbm_jcgxzd\n" +
                "    where SYCX in ('HXD3', 'HXD3C') and LBJBM in ('PJ00033', 'PJ00177')\n" +
                ") gxzd ON ps.cx = gxzd.SYCX\n" +
                "     LEFT JOIN dim_jmis_jcda_jcll_temp jcll ON ps.s_train_id = jcll.s_train_id and gxzd.WZMC = jcll.wz\n" +
                "     LEFT JOIN zx1 z1 ON ps.s_train_id = z1.S_TRAIN_ID\n" +
                "     LEFT JOIN zx2 z2 ON ps.s_train_id = z2.S_TRAIN_ID\n" +
                "     LEFT JOIN zx3 z3 ON ps.s_train_id = z3.S_TRAIN_ID").createOrReplaceTempView("dim_jmis_jcda_ps_jcll");
//        System.out.println("dim_jmis_jcda_ps_jcll 所有车+jcll表中数据+走行公里表中的数据 ++++++++++");
//        spark.sql("select * from dim_jmis_jcda_ps_jcll").orderBy("s_train_id").show(20, false);


        spark.sql("" +
                "SELECT distinct a1.CXM, a1.CH, a1.GXMC AS PJFLM, a1.WZ, a1.PJXLH, a1.PJDJM, a1.ZZRQ, a1.SCCJ, a1.GZXH AS GGXH_TH,\n" +
                // TODO 2023-06
                "                a1.CHANGETIME, a1.S_TRAIN_ID, a1.PJFLBM, a1.LJ_ID,a1.LJ,a1.ljjc,a1.PSD_ID,a1.PSD,a1.psdjc,a1.ljpxm," +
                "LJZXGL, GJXHZX, ZJXHZX, SysFL, SysFLCode1, SysFLCode2,\n" +
                "                jxxxOrderGroupUDF(a1.jcll_jxxx, a2.jxjld_jxxx) AS JXXX\n" +
                "FROM (\n" +
                "         -- 从dim_jmis_jcda_ps_jcll表中选择需要的字段，并添加一个名为jcll_jxxx的新字段，该字段是拼接后的JSON格式数据\n" +
                "         SELECT *,\n" +
                "            CONCAT('{\"部件状态\":\"新造\",',\n" +
                "                   '\"生产厂家\":\"',IF(SCCJ IS NOT NULL,SCCJ,'-'),'\",',\n" +
                "                   '\"检修段\":\"-\",',\n" +
                "                   '\"日期\":\"',IF(CHANGETIME IS NOT NULL,CHANGETIME,'-'),'\",'\n" +
                "                   '\"本次走行公里\":\"-\",',\n" +
                "                   '\"下车型号\":\"-\",',\n" +
                "                   '\"下车位置\":\"-\",',\n" +
                "                   '\"上车型号\":\"',IF(S_TRAIN_ID IS NOT NULL,S_TRAIN_ID,'-'),'\",'\n" +
                "                   '\"上车位置\":\"1\"}')jcll_jxxx\n" +
                "         FROM dim_jmis_jcda_ps_jcll\n" +
                "     ) a1 LEFT JOIN(\n" +
                "    -- 对SCXH, LBJBM, SCWZ进行分组，并使用jxxxOrderGroupUDF函数将jcll_jxxx与jxjld_jxxx进行拼接\n" +
                "    SELECT SCXH, LBJBM, SCWZ, CONCAT_WS('&', COLLECT_SET(data_jxxx)) AS jxjld_jxxx\n" +
                "    FROM (\n" +
                "             -- 从子查询中选择需要的字段，并拼接成JSON格式数据\n" +
                "             SELECT LBJBM, SCXH, SCWZ,\n" +
                "                     CONCAT('{\"部件状态\":\"',IF(BJZT IS NOT NULL,BJZT,'-'),\n" +
                "                     '\",\"生产厂家\":\"',IF(SCCJ IS NOT NULL,SCCJ,'-'),\n" +
                "                     '\",\"检修段\":\"',IF(JXD IS NOT NULL,JXD,'-'),\n" +
                "                     '\",\"日期\":\"',IF(RQ IS NOT NULL,RQ,'-'),\n" +
                "                     '\",\"本次走行公里\":\"',IF(BCZXGL IS NOT NULL,BCZXGL,'-'),\n" +
                "                     '\",\"下车型号\":\"',IF(XCXH IS NOT NULL,XCXH,'-'),\n" +
                "                     '\",\"下车位置\":\"',IF(XCWZ IS NOT NULL,XCWZ,'-'),\n" +
                "                     '\",\"上车型号\":\"',IF(SCXH IS NOT NULL,SCXH,'-'),\n" +
                "                     '\",\"上车位置\":\"',IF(SCWZ IS NOT NULL,SCWZ,'-'),\n" +
                "                     '\"}')data_jxxx\n" +
                "             FROM (\n" +
                "                      -- 从子查询中选择需要的字段，并根据SCXH和BJZT进行排序，以获取最新的数据\n" +
                "                      SELECT *, row_number() over (PARTITION BY SCXH, BJZT ORDER BY RQ DESC) AS `rank`\n" +
                "                      FROM (\n" +
                "                               SELECT LBJBM, CONCAT(RT, RC) AS BJZT, ZZCJ AS SCCJ, CXDW AS JXD, CREATE_TIME AS RQ,\n" +
                "                                      BCZXGL, CONCAT(XCCX, '-', XCCH) AS XCXH, XCWZ, DATE(JXRQ_JS) AS XCRQ,\n" +
                "                                      CONCAT(SCCX, '-', SCCH) AS SCXH, SCWZ, DATE(JXRQ_JS) AS SCRQ\n" +
                "                               FROM jxjld_jbxx\n" +
                "                               WHERE RECORD_STATUS = '0' AND (SCCX IN ('HXD3', 'HXD3C') AND LBJBM in ('PJ00177', 'PJ00033'))\n" +
                "                           ) t1\n" +
                "                  ) t2\n" +
                "             WHERE `rank` = '1'\n" +
                "         ) t3\n" +
                "    GROUP BY SCXH, LBJBM, SCWZ\n" +
                ") a2 ON a1.S_TRAIN_ID = a2.SCXH AND a1.PJFLBM = a2.LBJBM AND a1.WZ = a2.SCWZ").createOrReplaceTempView("jxjld_jcll_jxxx_temp");

        //读取配置文件中的kafka相关参数
        HashMap<String, Object> kafkaParams = InitKafka.init("TransFault111111111");

        //获取配置文件topic名称
        // String tcds_tcms_fault_topic = getProperties.getPropertiesKey("tcds_tcms_fault_topic", "config.properties");
        String tcds_tcms_fault_topic = getProperties.getPropertiesKey("tcds_tcms_bd_topic", "config.properties");
        final HashSet<String> topics = new HashSet<>(Arrays.asList(tcds_tcms_fault_topic.split(",")));
        //消费kafka中topic数据
        JavaInputDStream<ConsumerRecord<String, String>> directStream = KafkaUtils.createDirectStream(
                jsc,
                LocationStrategies.PreferConsistent(),
                ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams)
        );
        JavaDStream<String> map = directStream.map(new Function<ConsumerRecord<String, String>, String>() {
            @Override
            public String call(ConsumerRecord<String, String> stringStringConsumerRecord) throws Exception {
                String value = stringStringConsumerRecord.value();
                return value;
            }
        });

        map.foreachRDD(new VoidFunction<JavaRDD<String>>() {
            @Override
            public void call(JavaRDD<String> stringJavaRDD) throws Exception {
                if (!stringJavaRDD.isEmpty()) {
                    // JCDLLXMC,LJZXGL,RC,ZZC,ZZRQ,bjbm,bjpjm,cx,gzdm_wm,gzkssj_up,gzmc,idx,lj,lj_id,ljjc,phm_bjwz,phm_lb,phm_mc,psd,psd_id,psdjc,s_train_id,source,ssbj,ssxt,xtbm,xtpjm,zj_fa,zj_ms
                    spark.read().json(stringJavaRDD).filter(col("bjpjm").isin("PJ00033", "PJ00177")).createOrReplaceTempView("tcmsEvent");
                    // 计算走行公里并生成临时表
                    calculateTheMilesTraveled();
                    // 实现入 ads_phm_fault 、t_phm_sqwx  返回健康评估结果
                    extracted("'HXD3','HXD3C'", "'PJ00033', 'PJ00177'", "'10043','10044','10045','10046','10047','10048','10055','10056','10057','10058','10059','10060'", "when 'PJ00033' then '牵引风机故障' when 'PJ00177' then '冷却塔风机故障'");
                    Dataset<Row> tcmsPgDS = spark.sql("select t2.*, t1.PGCSZB,t1.ZHPJ_ZTZL from pgjg_temp t1 join jxjld_jcll_jxxx_temp t2 on t1.s_train_id = t2.S_TRAIN_ID and t1.bjpjm = t2.PJFLBM and t1.phm_bjwz = t2.WZ ")
                            // 2023-06-08 : 添加 "lj_id","lj","ljjc","psd_id","psd","psdjc",
                            .select("CXM", "CH", "PJFLM", "WZ", "PJXLH", "PJDJM", "ZZRQ", "SCCJ",
                                    "LJ_ID", "LJ", "ljjc", "PSD_ID", "PSD", "psdjc", "ljpxm",
                                    "GGXH_TH", "S_TRAIN_ID", "PJFLBM", "LJZXGL", "GJXHZX", "ZJXHZX", "SysFL", "SysFLCode1", "SysFLCode2", "JXXX", "PGCSZB", "ZHPJ_ZTZL");
                    // 所有车的所有信息
                    Dataset<Row> rowDataset = spark.sql("select t1.*,t3.BJPJM,concat(t3.PGCSZB,t3.PGCSZB_1) as PGCSZB,'A' as ZHPJ_ZTZL" +
                            " from jxjld_jcll_jxxx_temp  t1 left join pgjg_temp t2 on t1.s_train_id = t2.S_TRAIN_ID and t2.bjpjm = t1.PJFLBM and t2.phm_bjwz = t1.WZ " +
                            " left join ( " +
                            "    select distinct MX_ID,\n" +
                            "                    SYJX,\n" +
                            "                    MXMC,\n" +
                            "                    MXJG,\n" +
                            "                    BJPJM,\n" +
                            "                    '' JGMS,\n" +
                            "                    CONCAT('{\"模型ID\":', mx_id, ',\"模型简称\":\"', MXJG, '\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}') as PGCSZB,\n" +
                            "           case\n" +
                            "           when SYJX = 'HXD3' and BJPJM = 'PJ00033'  then '&{\"模型ID\":\"10059\",\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10060\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD3' and BJPJM = 'PJ00177'  then '&{\"模型ID\":\"10059\",\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10060\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD3C' and BJPJM = 'PJ00033' then '&{\"模型ID\":\"10056\",\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10057\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           when SYJX = 'HXD3C' and BJPJM = 'PJ00177' then '&{\"模型ID\":\"10044\",\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":\"10045\",\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}'\n" +
                            "           else ''\n" +
                            "           end PGCSZB_1" +
                            "    from t_phm_mxxq\n" +
                            "    where SYJX in ('HXD3', 'HXD3C') and BJPJM in ('PJ00033', 'PJ00177') and MXMC like '%TCMS预报警故障%'" +
                            ") t3 on t1.CXM = t3.syjx and t1.PJFLBM = t3.bjpjm " +
                            "where t2.s_train_id is null " +
                            "order by S_TRAIN_ID,wz,BJPJM");
                    Dataset<Row> finalDs = rowDataset
                            // 2023-06-08 : 添加 "lj_id","lj","ljjc","psd_id","psd","psdjc",
                            .select("CXM", "CH", "PJFLM", "WZ", "PJXLH", "PJDJM", "ZZRQ", "SCCJ", "LJ_ID", "LJ", "ljjc", "PSD_ID", "PSD", "psdjc", "ljpxm", "GGXH_TH", "S_TRAIN_ID", "PJFLBM", "LJZXGL", "GJXHZX", "ZJXHZX", "SysFL", "SysFLCode1", "SysFLCode2", "JXXX", "PGCSZB", "ZHPJ_ZTZL")
                            .union(tcmsPgDS)
                            .withColumn("JWDJC", col("psdjc"))
                            .withColumn("id", expr("uuid()"))
                            .withColumn("UPDATATIME", date_format(expr("now()"), "yyyy-MM-dd HH:mm:ss"))
                            // 2023-06-08 添加 "lj_id","lj","ljjc","psd_id","psd","psdjc",
                            .select("ID", "S_TRAIN_ID", "PJDJM", "PJFLBM", "PJFLM", "PJXLH", "GGXH_TH", "ZZRQ", "SCCJ",
                                    "LJ_ID", "LJ", "LJJC", "PSD_ID", "PSD", "JWDJC", "LJPXM",
                                    "LJZXGL", "GJXHZX", "ZJXHZX", "JXXX", "PGCSZB", "UPDATATIME", "WZ", "SYSFL", "SYSFLCODE1", "SYSFLCODE2", "ZHPJ_ZTZL");
                    // .withColumn("PJFLM", when(col("PJFLM").isNull().and(col("PJFLBM").equalTo("PJ00033")), "牵引通风机").otherwise("冷却塔风机")),
                    Dataset<Row> finDs = finalDs.withColumn("SYSFL", expr("case when  PJFLBM = 'pJ00033' then '牵引通风机' when PJFLBM = 'pJ00177' then '冷却塔风机' else '' end "))
                            .withColumn("PJFLM", expr("case when PJFLBM = 'PJ00033' then '牵引通风机' when PJFLBM = 'PJ00177' then '冷却塔风机' else '' end "));
//                    MysqlUtils_2.upsertDatasetToMySQL(
//                            finDs,
//                            "parts_label_1");

                    finDs.write().mode("OverWrite").format("org.apache.phoenix.spark")
                            .option("dirver", "org.apache.phoenix.jdbc.PhoenixDriver")
                            .option("zkUrl", "jdbc:phoenix:JWGL-YY-T141149,JWGL-YY-T141150,JWGL-YY-T141151,JWGL-YY-T141152,JWGL-YY-V139150,JWGL-YY-V139151,JWGL-YY-V139152,JWGL-YY-V139153:2181:/hbase-unsecure")
                            .option("table", "LOCOMOTIVE_ADS.PARTS_LABEL").save();
                }
            }
        });
        jsc.start();
        try {
            jsc.awaitTermination();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        spark.stop();
    }


    // 计算走行公里并生成临时表
    private static void calculateTheMilesTraveled() {
        //1、计算累计走行（取整车累计走行）
        spark.sql("select concat(cx,'-',ch) as S_TRAIN_ID,round(LJZX) as LJZX from dim_jmis_jcda_t_jxglzxgl_v")
                .createOrReplaceTempView("zx1");

        //2、计算高级修后走行（C5/C6都算高级修）
        String sqlgj
                = "SELECT A.S_TRAIN_ID,ROUND(SUM(A.ZXGL_DAY), 0) AS LJZX\n" +
                "  FROM\n" +
                "    (SELECT jxdt_mid.S_TRAIN_ID AS S_TRAIN_ID,zx.ZXGL_DAY AS ZXGL_DAY\n" +
                "    FROM\n" +
                "      (SELECT CONCAT(cxjc, '-', ch) AS S_TRAIN_ID,MAX(JGRQ) AS JXRQ\n" +
                "      FROM\n" +
                "        (SELECT *\n" +
                "        FROM dim_jmis_jxgl_t_jxgl_jxdt\n" +
                "        WHERE RC IN ('C4修', 'C5修', 'C6修'))\n" +
                "      GROUP BY cxjc,ch) jxdt_mid\n" +
                "      LEFT JOIN dim_jmis_jcda_zxgl_day zx\n" +
                "        ON jxdt_mid.S_TRAIN_ID = CONCAT(zx.CX, '-', zx.CH) AND zx.REPOT_DATE > jxdt_mid.JXRQ) A\n" +
                "  GROUP BY S_TRAIN_ID";
        spark.sql(sqlgj).createOrReplaceTempView("zx2");

        //3、计算最近修后走行
        String sqlxh
                = "SELECT A.S_TRAIN_ID,ROUND(SUM(A.ZXGL_DAY), 0) AS LJZX\n" +
                "  FROM\n" +
                "    (SELECT CONCAT(zjxc.cxjc, '-', zjxc.ch) AS S_TRAIN_ID,zx.ZXGL_DAY AS ZXGL_DAY\n" +
                "    FROM\n" +
                "      dim_jmis_t_jcbm_zjxc zjxc\n" +
                "      LEFT JOIN dim_jmis_jcda_zxgl_day zx\n" +
                "        ON CONCAT(zx.CX, '-', zx.CH) = CONCAT(zjxc.cxjc, '-', zjxc.ch) AND zx.REPOT_DATE > zjxc.KCRQ) A\n" +
                "  GROUP BY S_TRAIN_ID";
        spark.sql(sqlxh).createOrReplaceTempView("zx3");
    }

    /***
     * 数据落表逻辑: 入 ads_phm_fault 、 t_phm_sqwx ,并返回 健康评估结果:
     * @param cx_ch_in
     * @param bjpjm_in
     * @param mx_id_in
     * @param mxmcSql
     * @throws IOException
     */
    public static void extracted(String cx_ch_in, String bjpjm_in, String mx_id_in, String mxmcSql) throws IOException {
        // tcms 实时数据 + jcll_1 + aei 表中数据进行关联,获取实时+当前趟中实时报警数据集信息
        String sql1 = "SELECT te.*,regexp_replace(te.phm_mc,' ','') as phm_mc_1,\n" +
                "       jcll.PJXLH,\n" +
                "       jcll.PJDJM,\n" +
                "       aei1.cksj\n" +
                "FROM (\n" +
                "         SELECT *,\n" +
                "                'TCMS'         AS gzly,\n" +
                "                '-'            AS yyzl,\n" +
                "                '模型诊断信息' AS zdlb,\n" +
                "                gzdm            AS gzm\n" +
                "         FROM tcmsEvent\n" +
                "         where cx in (" + cx_ch_in + ")\n" +
                "           and phm_lb != '-'\n" +
                "           and bjpjm in (" + bjpjm_in + ")\n" +
                "     ) te\n" +
                "         LEFT JOIN dim_jmis_jcda_jcll_temp jcll\n" +
                "                   ON te.s_train_id = jcll.S_TRAIN_ID\n" +
                "                       AND te.phm_bjwz = jcll.WZ\n" +
                "                       AND te.BJPJM = jcll.PJFLBM\n" +
                "         LEFT JOIN (\n" +
                "    SELECT *\n" +
                "    FROM aei\n" +
                ") aei1\n" +
                "                   ON te.s_train_id = CONCAT(aei1.loco_type, '-', aei1.loco_no)";
        // TODO 2023-06-16
        spark.sql(sql1).filter(expr("cksj is not null or cksj !=''")).createOrReplaceTempView("tcms_jcll_1_temp");

        // tcms + jcll_1 + aei +  mxxq  表关联结果.
        Dataset<Row> tcms_jcll_mxxq_DS = spark.sql("" +
                "select JCDLLXMC, LJZXGL, RC, ZZC, ZZRQ, bjbm, t1.bjpjm, cx, ch, gzdm, gzkssj_up, gzmc, idx, lj, lj_id, ljjc,\n" +
                "       phm_bjwz,\n" +
                "       phm_lb, phm_mc_1 as phm_mc, psd, psd_id, psdjc, s_train_id, source, ssbj, ssxt, xtbm, xtpjm, zj_fa, zj_ms, gzly, yyzl, yfyy,\n" +
                "       zdlb, gzm, PJXLH, PJDJM, cksj,phm_mc_1,\n" +
                "       MX_ID, SYJX, MXMC, MXJG, JGMS, XXMS, CZJY, ZBJY, JXJY, YFYY\n" +
                "from tcms_jcll_1_temp t1 join (\n" +
                "    select MX_ID,\n" +
                "           SYJX,\n" +
                "           MXMC,\n" +
                "           MXJG,\n" +
                "           BJPJM,\n" +
                "           JGMS,\n" +
                "           XXMS,\n" +
                "           CZJY,\n" +
                "           ZBJY,\n" +
                "           JXJY,\n" +
                "           YFYY\n" +
                "    from t_phm_mxxq\n" +
                "    where SYJX in (" + cx_ch_in + ") and MX_ID in (" + mx_id_in + ")\n" +
                // and t1.bjpjm = t2.BJPJM
                ") t2 on  regexp_replace(t1.phm_mc_1, ' ', '') = regexp_replace(t2.xxms, ' ', '') and t1.bjpjm = t2.BJPJM and substring_index(t1.s_train_id, '-', 1) = t2.SYJX").dropDuplicates();

        ///入 ads_phm_fault 表中的逻辑: 来一条报警数据就入一条. 这里的操作: 将 tcms实时数据 + jcll表中的数据 + mxxq 表中的数据 关联后的结果,选取 ads_phm_fault 这个表中所需要的字段,改名并落表.
        Dataset<Row> adsPhmFaultDS = tcms_jcll_mxxq_DS.selectExpr(
                "s_train_id",
                "idx AS s_fault_id",
                "concat_ws('-', s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS mx_id",
                "s_train_id AS s_train_name",
                "cx AS s_train_type_code",
                "source AS s_fault_source",
                "gzkssj_up AS s_fault_time",
                "ssxt AS s_fault_sys",
                "xtbm AS s_fault_sys_code1",
                "xtpjm AS s_fault_sys_code2",
                "ssbj AS s_fault_bw",
                "bjbm AS s_fault_bw_code1",
                "bjpjm AS s_fault_bw_code2",
                "gzdm AS s_fault_code",
                "gzmc AS s_fault_name",
                "phm_lb",
                "mxjg",
                "phm_mc_1 AS phm_gzmc",
                "phm_bjwz",
                "zj_ms",
                "zj_fa",
                "pjxlh",
                "pjdjm",
                "1 AS s_htzt",
                "0 AS s_hkzt",
                "1 AS s_xfzt"
        ).dropDuplicates("s_fault_id");
        // 测试通过,入 ads_phm_fault 表中的数据没有问题.
        MysqlUtils_2.writeDataToMysqlTable(adsPhmFaultDS, "ads_phm_fault", SaveMode.Append);
        // 拉取当前批次中入 t_phm_sqwx 表中的字段. 这里需要进行聚合操作,
        // TODO 2023-05-28: 修改入 t_phm_sqwx 这个表中 mxmc 字段的取值: 由以前的 jgms 字段 --> mxmc 字段.
        Dataset<Row> sqwxLocalBatchDS = tcms_jcll_mxxq_DS.selectExpr("concat_ws('-', s_train_id, phm_bjwz, mx_id, date_format(cksj, 'yyyyMMddHHmmss')) AS ID", "s_train_id",
                "cx",
                // 生产上kafka传过来的数据是有这个字段的,上次最终测试后我修改过.这里直接加上
                "ch",
                "lj",
                "ljjc",
                "lj_id AS ljm",
                "psd",
                "psdjc",
                "psd_id AS psddm",
                "ssxt",
                "xtpjm",
                "ssbj",
                "bjpjm",
                "phm_bjwz AS bjwz",
                "pjxlh AS bjxlh",
                "pjdjm AS bjdjm",
                "mxjg AS mx",
                // 2023-05-28 修改 表事情维修表中 mxmc 这个字段的取值,由以前的取模型详情表中的 jgms 这个字段,修改为:MXMC 这个字典的值.
                //"jgms AS mxmc",
                "mxmc",
                "xxms AS mxms",
                "current_timestamp AS create_time",
                "czjy",
                "1 AS xfzt",
                "1 AS htzt",
                "0 AS hkzt",
                // 这里有点问题,以前定的是: 0-变压器 1-变流器,其他部件呢 ? 确认后先空着,这里先用 '-' 占位
                "1 AS status",
                "jcdllxmc AS dllx",
                "rc",
                "'-' AS yyzl",
                "zzc AS sccj",
                "zzrq",
                "ljzxgl AS ljzx",
                "gzdm AS gzm",
                "'TCMS' AS gzly",
                "'-' AS zdlb",
                // 这里的发生时间,上次最后代码ch要求修改过,取的时间是 故障发生的最小时间.
                "gzkssj_up as fssj",
                "yfyy",
                "zbjy AS hkzbjy",
                "JXJY AS hkjxjy"
        );
        // 获取当前趟中 t_phm_sqwx 历史数据,和上面的当前批次中的数据进行 union,再进行关键字段的聚合,最终将当前执行结果更新写操作入表.
        Dataset<Row> sqwxHistoryDS = spark.sql("select id, s_train_id, cx, ch, lj, ljjc, ljm, psd, psdjc, psddm, ssxt, xtpjm, ssbj, bjpjm, bjwz, bjxlh, bjdjm, mx, mxmc, mxms, create_time, czjy, xfzt, htzt,hkzt, status, dllx, rc, yyzl, sccj, zzrq, ljzx, gzm, gzly, zdlb, fssj, yfyy, hkzbjy, hkjxjy from t_phm_sqwx where HKZT = '0' and BJPJM IN (" + bjpjm_in + ") and CX IN (" + cx_ch_in + ")");

        Dataset<Row> sqwxUpdataDS = sqwxLocalBatchDS.union(sqwxHistoryDS)
                .groupBy("id", "s_train_id", "cx", "ch", "lj", "ljjc", "ljm", "psd", "psdjc", "psddm", "ssxt", "xtpjm", "ssbj", "bjpjm", "bjwz", "bjxlh", "bjdjm", "xfzt", "htzt", "hkzt", "status", "dllx", "rc", "yyzl", "sccj", "zzrq", "ljzx", "gzly", "zdlb")
                .agg(
                        concat_ws(",", collect_set(col("mx"))).alias("mx"),
                        concat_ws(",", collect_set(col("mxmc"))).alias("mxmc"),
                        concat_ws(",", collect_set(col("mxms"))).alias("mxms"),
                        concat_ws(",", collect_set(col("czjy"))).alias("czjy"),
                        concat_ws(",", collect_set(col("yfyy"))).alias("yfyy"),
                        concat_ws(",", collect_set(col("gzm"))).alias("gzm"),
                        concat_ws(",", collect_set(col("hkzbjy"))).alias("hkzbjy"),
                        concat_ws(",", collect_set(col("hkjxjy"))).alias("hkjxjy"),
                        min("fssj").alias("fssj")
                )
                .withColumn("create_time", current_timestamp())
                .withColumn("mx", expr("distinctString(mx)"))
                .withColumn("mxmc", expr("distinctString(mxmc)"))
                .withColumn("mxms", expr("distinctString(mxms)"))
                .withColumn("czjy", expr("distinctString(czjy)"))
                .withColumn("yfyy", expr("distinctString(yfyy)"))
                .withColumn("gzm", expr("distinctString(gzm)"))
                .withColumn("hkzbjy", expr("distinctString(hkzbjy)"))
                .withColumn("hkjxjy", expr("distinctString(hkjxjy)"));
        // 写入 t_phm_sqwx 中数据
        MysqlUtils_2.upsertDatasetToMySQL(sqwxUpdataDS, "t_phm_sqwx");

        Dataset<Row> tcms_jkpgDs = tcms_jcll_mxxq_DS.selectExpr("s_train_id", "bjpjm", "phm_bjwz", "mx_id", "MXJG", "xxms");
        // 拉取 ads_phm_fault 表中当前趟中的历史数据,用来做 模型粒度的健康评估. ads_phm_fault
        Dataset<Row> ads_history_jkpgDs = spark.sql("select s_train_id,\n" +
                "       S_FAULT_BW_CODE2                                         as bjpjm,\n" +
                "       phm_bjwz,\n" +
                "       substring_index(substring_index(MX_ID, '-', 4), '-', -1) as mx_id,\n" +
//                // TODO : 这段代码要改一下: 这里不能写死,要取到 模型详情表中的 mxjg,这里不能写死.
//                "       case S_FAULT_BW_CODE2\n" +
//                "           " + mxmcSql + "\n" +
//                "           else '-' end                                             MXJG,\n" +
                "    MXJG,    \n" +
                "       phm_gzmc                                                 as xxms\n" +
                "from ads_phm_fault\n" +
                "where S_HKZT = '0' and S_FAULT_BW_CODE2 IN (" + bjpjm_in + ") and s_train_type_code IN (" + cx_ch_in + ")");
        tcms_jkpgDs.union(ads_history_jkpgDs).groupBy("s_train_id", "bjpjm", "phm_bjwz", "mx_id", "MXJG")
                .agg(
                        collect_set(col("xxms")).as("xxms")

                )
                .withColumn("cx", split(col("s_train_id"), "-").getItem(0))
                //.withColumn("pgjg", when(size(col("xxms")).geq(2), "C").otherwise("B"))
                //.withColumn("pgjg", when(col("MXJG").rlike("冷却塔").or(col("MXJG").rlike("牵引风机")), lit("C")).otherwise("A"))
                .withColumn("pgjg", lit("D"))
                .withColumn("ZHPJ_ZTZL", when(col("pgjg").equalTo("D"), lit("D")).otherwise("A"))
                .withColumn("xxms", concat_ws(",", col("xxms")))
                .withColumn("mxzy", when(col("pgjg").equalTo("D"), "故障").otherwise("正常"))
                .withColumn("PGCSZB", expr("CONCAT('{\"模型ID\":', mx_id, ',\"模型简称\":\"', mxjg, '\",\"模型结果\":\"', pgjg, '\",\"模型结果描述\":\"', xxms,'\",\"模型转义\":\"', mxzy, '\"}')"))
                .withColumn("PGCSZB", expr(" case\n" +
                        "           when cx = 'HXD3' and BJPJM = 'PJ00033' then concat(PGCSZB,'&{\"模型ID\":10059,\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10060,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                        "           when cx = 'HXD3' and BJPJM = 'PJ00177' then concat(PGCSZB,'&{\"模型ID\":10059,\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10060,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                        "           when cx = 'HXD3C' and BJPJM = 'PJ00033' then concat(PGCSZB,'&{\"模型ID\":10056,\"模型简称\":\"人工检查牵引通风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10057,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                        "           when cx = 'HXD3C' and BJPJM = 'PJ00177' then concat(PGCSZB,'&{\"模型ID\":10044,\"模型简称\":\"人工检查冷却塔风机故障\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}&{\"模型ID\":10045,\"模型简称\":\"设备检测数据异常\",\"模型结果\":\"A\",\"模型转义\":\"正常\"}')\n" +
                        "else '' end PGCSZB"))
                .dropDuplicates()
                .createOrReplaceTempView("pgjg_temp");
    }
}
