package zjs.dc.controller;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

/**
 * @Class:
 * @Author: xyl
 * @Description: 针对企查查 t_judgement_detail 表和中数 t_getexecutejudgmentinfo 表进行数据映射开发
 * @Date: 2021/12/17 16:30
 */
public class TJudgementDetail {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder().appName("t_judgement_detail").enableHiveSupport().getOrCreate();
        spark.sparkContext().setLogLevel("ERROR");
        Dataset<Row> zs_data = spark.sql("select * from a_dc_ep_ods.zs_t_getexecutejudgmentinfo_base");
        long zs_count = zs_data.count();
        if(zs_count>0){

            /**
             * t_judgement_detail 裁判文书详情
             */
            //获取中数删除数据表中的数据 --- 用“案号”来删除
            Dataset<Row> delete_data = spark.sql("select \n" +
                    "   a.case_id,a.case_name,a.case_no,a.case_type,a.content,a.court,a.create_date,a.submit_date,a.update_date,\n" +
                    "   a.appellor,a.judge_date,a.case_reason,a.trial_round,a.defendant_list,a.prosecutor_list,a.is_valid,a.content_clear,a.fmt_content, \n" +
                    "   a.court_notice_list,a.related_companies,b.JOBID,'-1' as isadd  \n" +
                    "from (select * from a_dc_ep_dwi.t_judgement_detail where case_no is not null and case_no !='')a  " +
                    "inner join (select * from a_dc_ep_ods.zs_t_getexecutejudgmentinfo_base_del where CASECODE is not null and CASECODE !='') b  " +
                    "on upper(trim(translate(translate(a.case_no,\"（\",\"(\"),\"）\",\")\"))) = upper(trim(translate(translate(b.CASECODE,\"（\",\"(\"),\"）\",\")\"))) ");
            delete_data.createOrReplaceTempView("tmp_t_judgement_detail");
            spark.sql("insert overwrite table a_dc_ep_incr.t_judgement_detail select * from tmp_t_judgement_detail");

            //中数更新数据--“案号”关联
            Dataset<Row> linshi_data = spark.sql("select \n" +
                    "  docid,\n" +
                    "  concat_ws(',',collect_list(name)) as  appellor \n" +
                    " FROM a_dc_ep_ods.zs_t_getexecutejudgmentinfo_party group by docid");
            linshi_data.createOrReplaceTempView("linshi_data_temp");

            Dataset<Row> linshi_data1 = spark.sql("select \n" +
                    "  t.docid,\n" +
                    "  concat_ws(',',collect_list(t.name)) as  yg \n" +
                    " FROM (select * from a_dc_ep_ods.zs_t_getexecutejudgmentinfo_party where type_name='原告')t group by t.docid");
            linshi_data1.createOrReplaceTempView("linshi_data_temp1");

            Dataset<Row> linshi_data2 = spark.sql("select \n" +
                    "  t.docid,\n" +
                    "  concat_ws(',',collect_list(t.name)) as  bg \n" +
                    " FROM (select * from a_dc_ep_ods.zs_t_getexecutejudgmentinfo_party where type_name='被告')t group by t.docid");
            linshi_data2.createOrReplaceTempView("linshi_data_temp2");

            Dataset<Row> linshi_data3 = spark.sql("select \n" +
                    "  t.docid,\n" +
                    "  concat_ws(',',collect_list(distinct t.name)) as eps \n" +
                    " FROM (select * from a_dc_ep_ods.zs_t_getexecutejudgmentinfo_party where length(name)>3)t group by t.docid");
            linshi_data3.createOrReplaceTempView("linshi_data_temp3");

            Dataset<Row> update_data = spark.sql("SELECT " +
                    "       a.DOCID,\n" +
                    "       a.CASE_TITLE,\n" +
                    "       a.CASECODE,\n" +
                    "       a.CASE_DOC_NAME,\n" +
                    "       \"\",\n" +
                    "       a.COURT_NAME,\n" +
                    "       \"\",\n" +
                    "       \"\",\n" +
                    "       \"\",\n" +
                    "       b.appellor,\n" +
                    "       a.JUDGMENT_DATE,\n" +
                    "       a.CASE_REASON,\n" +
                    "       \"\",\n" +
                    "       e.bg,\n" +
                    "       d.yg,\n" +
                    "       \"1\",\n" +
                    "       \"\",\n" +
                    "       a.FFL_CONTENT,\n" +
                    "       if(t3.pj2 is not null,concat_ws('',t3.pj1,t3.pj2,t3.pj3),'{\"TotalNum\":0,\"CourtNoticeInfo\":[]}'),\n" +
                    "       if(c.eps is not null,concat_ws(',',c.eps,a.COURT_NAME),a.COURT_NAME),\n" +
                    "       a.JOBID,\n" +
                    "       \"0\" as isadd  " +
                    "FROM " +
                            "(select * from a_dc_ep_ods.zs_t_getexecutejudgmentinfo_base WHERE casecode IS NOT NULL AND casecode !='' and length(jobid)<10) a "+
                    "left JOIN linshi_data_temp b on a.docid=b.docid " +
                    "left JOIN linshi_data_temp3 c on a.docid=c.docid " +
                    "left JOIN linshi_data_temp1 d on a.docid=d.docid " +
                    "left JOIN linshi_data_temp2 e on a.docid=e.docid " +
                    "left join (select upper(trim(translate(translate(tc1.case_no,\"（\",\"(\"),\"）\",\")\"))) as case_no,'{\"TotalNum\":' as pj1,cast(count(distinct substr(tc1.lian_date,1,4)) as string) as pj2,'\\,\"CourtNoticeInfo\":[]}' as pj3 from (select * from a_dc_ep_dwi.t_courtnotice where lian_date is not null)tc1 group by upper(trim(translate(translate(tc1.case_no,\"（\",\"(\"),\"）\",\")\"))) )t3 on upper(trim(translate(translate(a.CASECODE,\"（\",\"(\"),\"）\",\")\"))) = upper(trim(translate(translate(t3.case_no,\"（\",\"(\"),\"）\",\")\")))  "
                    );
            update_data.createOrReplaceTempView("tmp_t_judgement_detail1");
            spark.sql("insert into table a_dc_ep_incr.t_judgement_detail select * from tmp_t_judgement_detail1");
        }else {
            spark.sql("TRUNCATE TABLE a_dc_ep_incr.t_judgement_detail");
            System.out.println("中数本期数据涉及该表的数据为空。。。。。。");
        }
        spark.stop();
    }
}