package zjs.dc.controller;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;


//******************** remark 备注字段 penalty_amount 行政处罚金额 两个字段暂未匹配 *******************************

public class TEciPenalty {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder().appName("t_eci_penalty").enableHiveSupport().getOrCreate();
        spark.sparkContext().setLogLevel("ERROR");
        Dataset<Row> zs_data = spark.sql("select * from a_dc_ep_ods.zs_t_casebaseinfo");
        long zs_count = zs_data.count();
        if(zs_count>0){

            // 处理步骤说明 1.UPDATE 2.DELETE
            // 1.UPDATE

            Dataset<Row> update_data = spark.sql("SELECT \n" +
                    "md5(CONCAT(UPPER(TRIM(A.CREDITCODE)),UPPER(TRIM(A.PENAUTH_CN)),UPPER(TRIM(A.PENDECNO)))) AS ID,\n" +
                    "B.KEY_NO,\n" +
                    "B.COMPANY_ID,\n" +
                    "B.COMPANY_NAME,\n" +
                    "A.PENDECNO AS DOC_NO,\n" +
                    "A.ILLEGACTTYPE AS PENALTY_TYPE,\n" +
                    "A.PENAUTH_CN AS OFFICE_NAME,\n" +
                    "A.PENCONTENT AS CONTENT,\n" +
                    "DATE_FORMAT(A.PENDECISSDATE,'yyyy-MM-dd') AS PENALTY_DATE,\n" +
                    "DATE_FORMAT(A.PUBLICDATE,'yyyy-MM-dd') AS PUBLIC_DATE,\n" +
                    "'' AS REMARK,\n" +
                    "'' AS PENALTY_AMOUNT,\n" +
                    "A.JOBID, '0' AS ISADD \n" +
                    "FROM \n" +
                    "(\n" +
                    "    select del.creditcode,c.* from a_dc_ep_ods.zs_t_casebaseinfo c\n" +
                    "    inner join a_dc_ep_ods.zs_t_casebaseinfo_del del on upper(trim(del.entid)) = upper(trim(c.entid))\n" +
                    "    where del.creditcode is not null and del.creditcode != '' \n" +
                    ") A INNER JOIN \n" +
                    "a_dc_ep_ods.t_eci_company B \n" +
                    "ON UPPER(TRIM(B.CREDIT_CODE)) = UPPER(TRIM(A.CREDITCODE))");
            update_data.createOrReplaceTempView("tmp_t_eci_penalty");
            spark.sql("insert overwrite table a_dc_ep_incr.t_eci_penalty select distinct * from tmp_t_eci_penalty");


            // 2.DELETE
            spark.sql("select distinct del.creditcode,del.jobid from a_dc_ep_ods.zs_t_casebaseinfo_del del " +
                    "left join a_dc_ep_ods.zs_t_casebaseinfo c " +
                    "on upper(trim(c.entid)) = upper(trim(del.entid)) " +
                    "where c.entid is null and del.creditcode is not null and del.creditcode != ''").createOrReplaceTempView("tmp_del_code");
            spark.sql("select a.* ,b.credit_code from a_dc_ep_dwi.t_eci_penalty a " +
                    "inner join a_dc_ep_ods.t_eci_company b on a.company_id = b.company_id").createOrReplaceTempView("tmp_eci_penalty");
            Dataset<Row> delete_data = spark.sql("select \n" +
                    "p.id,\n" +
                    "p.key_no,\n" +
                    "p.company_id,\n" +
                    "p.company_name,\n" +
                    "p.doc_no,\n" +
                    "p.penalty_type,\n" +
                    "p.office_name,\n" +
                    "p.content,\n" +
                    "p.penalty_date,\n" +
                    "p.public_date,\n" +
                    "p.remark,\n" +
                    "p.penalty_amount,\n" +
                    "del.jobid,'-1' as isadd from\n" +
                    "tmp_del_code del \n" +
                    "inner join tmp_eci_penalty p on upper(trim(del.creditcode)) = upper(trim(p.credit_code))");
            delete_data.createOrReplaceTempView("tmp_t_eci_penalty2");
            spark.sql("insert into table a_dc_ep_incr.t_eci_penalty select distinct a.* from tmp_t_eci_penalty2 a");
        }else {
            spark.sql("TRUNCATE TABLE a_dc_ep_incr.t_eci_penalty");
            System.out.println("中数本期数据涉及该表的数据为空。。。。。。");
        }

        spark.stop();
    }
}
