# Copyright (c) 2015 Shiye Inc.
# All rights reserved.
# Author: lqp <liuqingpeng@shiyedata.com>
# Date:   2022/12/20

import math
from util.datetime_util import DatetimeUtil
from pyspark.sql import SparkSession
from pyspark.sql import Row, functions as F
from base.task_base import TaskBase


# 中行苏州分行数据定制-产业链经营指标表（中行苏州分行专用）

class JSAPPCHINInfo (TaskBase):
    def __init__(self):
        super().__init__()
        self.spark = SparkSession.builder \
            .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
            .config("spark.kryoserializer.buffer.max", "1024m") \
            .config("spark.kryoserializer.buffer", "512m") \
            .config("spark.yarn.executor.memoryOverhead", "2048") \
            .config("spark.storage.memoryFraction", "0.4") \
            .config("hive.exec.dynamic.partition", True) \
            .config("hive.exec.dynamic.partition.mode", "nonstrict") \
            .config("spark.sql.sources.partitionOverwriteMode", "DYNAMIC") \
            .config("spark.sql.broadcastTimeout", "36000") \
            .config("spark.driver.maxResultSize", "32G") \
            .config("spark.executor.memory", "16G") \
            .config("spark.driver.memory", "32G") \
            .getOrCreate()

        self.file_date = str(DatetimeUtil.get_today_str())
        self.modify_time = str(DatetimeUtil.get_datetime_now())
        self.yesterday = str(DatetimeUtil.get_n_day(-1, str_format="%Y%m%d"))
        self.three_day_ago = str(DatetimeUtil.get_n_day(-3, str_format="%Y%m%d"))
        self.pt = "cdh"
        self.oth = {
            "cdh": "seeyii_data_house",
            "emr": "seeyii_emr_bus",
        }
        self.begin_year = 2020
        self.start_year = 2019
        self.q_last_list = ["03-31","06-30","09-30","12-31"]
        self.q_begin_list = ["01-01","04-01","07-01","10-01"]
        self.cur_q = math.ceil(int(DatetimeUtil.get_today_str(str_format="%m"))/3)
        self.cur_year = int(DatetimeUtil.get_today_str(str_format="%Y"))
        self.tb_name = "app_bocsz_ic_operate_index_info"
        self.is_debug = True

    def process(self, *args, **kwargs):
        # self.pt = "cdh"args[0][1]
        self.bs_db = "seeyii_data_spc" if self.pt == "cdh" else "seeyii_emr_tert"
        self.tb_fall_name = f"{self.bs_db}.{self.tb_name}"
        self.start()

    def start(self):
        base_df = self.init_bs()
        self.__show("count -  show  "," base_df  ",base_df)
        sk_df = self.init_sk()
        sk_part, gs_part = self.p_out_type(base_df, sk_df)
        self.__show("count -  show  ", " sk_part  ", sk_part)
        self.__show("count -  show  ", " gs_part  ", gs_part)
        sk_res = self.ex_sk(sk_part)
        self.__show("count -  show  ", " sk_res  ", sk_res)
        gs_res = self.ex_gs(gs_part)
        self.__show("count -  show  ", " gs_res  ", gs_res)
        ss_res = self.ex_ss(base_df)
        self.__show("count -  show  ", " ss_res  ", ss_res)
        self.final_union(base_df,gs_res,sk_res,ss_res)

    def ex_gs(self, df):
        self.init_fin_gscw(df).createOrReplaceTempView("fin_res")
        ls = []
        res_bk = []
        for i in range(self.start_year, self.cur_year+1):
            sql = f"""
            select chainCode,chainName,"{i}-12-31" as endDate,
                sum(double(operate_tax))/10000 as operate_tax,
                sum(double(operate_profit))/10000 as operate_profit,
                sum(double(operate_revenue))/10000 as operate_revenue,
                sum(int(employment_num)) as employment_num,
                sum(double(operate_assets)) / abs(sum(operate_assets_flag)) as operate_assets,
                sum(double(operate_liabilities)) / abs(sum(operate_liabilities_flag)) as operate_liabilities,
                sum(double(operate_revenue)) / abs(sum(operate_revenue_employee_flag)) as employment_revenue,
                sum(double(totprofit)) / abs(sum(totprofit_employee_flag)) as employment_profit,
                sum(double(operate_tax)) / abs(sum(operate_tax_flag)) as employment_tax
            from fin_res where reportyear = "{i}"
            group by chainCode,chainName,reportyear
            """
            df_res = self.spark.sql(sql).distinct().cache()

            ls.append(df_res)

        self.merge_dfs(ls, 0, len(ls) - 1, res_bk)
        return res_bk[0]

    def ex_sk(self,df):
        self.init_nb(df).createOrReplaceTempView("nb_table")
        self.init_fin_main_index(df).createOrReplaceTempView("fin_main_table")
        sql = """ select chainCode,chainName,compcode,operate_tax,
        if(employment_num is null, 0,employment_num) as employment_num,fiscalyear from nb_table """
        nb_base_df = self.spark.sql(sql).distinct().cache()
        sql = """
        select chainCode,chainName,compcode,enddate,operate_profit,operate_revenue,operate_assets,operate_liabilities,totalProfitb,
        if(operate_assets is null, 0, 1) as operate_assets_flag, 
        if(operate_liabilities is null, 0, 1) as operate_liabilities_flag
        from fin_main_table
        """
        fin_main_base_df = self.spark.sql(sql).distinct().cache()
        df.join(nb_base_df,["chainCode","chainName","compcode"], "inner")\
            .join(fin_main_base_df,["chainCode","chainName","compcode"], "inner").distinct().cache()\
            .createOrReplaceTempView("union_base_table")

        sql = """
        select chainCode,chainName,compcode,enddate,fiscalyear,operate_tax,operate_profit,operate_revenue,totalProfitb,
            operate_assets,operate_liabilities,operate_assets_flag,operate_liabilities_flag,employment_num,
            if(totalProfitb is null, 0, employment_num) as employment_profit_flag,
            if(operate_revenue is null, 0, employment_num) as operate_revenue_employee_flag,
            if(operate_tax is null, 0, employment_num) as operate_tax_flag
        from union_base_table
        """
        self.spark.sql(sql).distinct().cache().createOrReplaceTempView("ss_base_table")
        ls = []
        res_bk = []
        for i in range(self.start_year, self.cur_year + 1):
            sql = f"""
            select chainCode,chainName,endDate,
                sum(double(operate_tax))/10000 as operate_tax,
                sum(double(operate_profit))/10000 as operate_profit,
                sum(double(operate_revenue))/10000 as operate_revenue,
                sum(int(employment_num)) as employment_num,
                sum(double(operate_assets)) / abs(sum(operate_assets_flag)) as operate_assets,
                sum(double(operate_liabilities)) / abs(sum(operate_liabilities_flag)) as operate_liabilities,
                sum(double(operate_revenue)) / abs(sum(operate_revenue_employee_flag)) as employment_revenue,
                sum(double(totalProfitb)) / abs(sum(employment_profit_flag)) as employment_profit,
                sum(double(operate_tax)) / abs(sum(operate_tax_flag)) as employment_tax
            from ss_base_table where string(enddate) in ( "{i}-3-31","{i}-06-30","{i}-09-30","{i}-12-31")
            group by chainCode,chainName,enddate
            """
            df_res = self.spark.sql(sql).distinct().cache()
            ls.append(df_res)

        self.merge_dfs(ls, 0, len(ls) - 1, res_bk)
        return res_bk[0]

    def ex_ss(self, df):
        aaa =self.init_ss(df)
        self.__show("ex_ss -  show  ", " aaa  ", aaa)
        aaa.createOrReplaceTempView("ss_res")
        ls = []
        res_bk = []
        for i in range(self.begin_year, self.cur_year + 1):
            sql = f"""
            with
                base_tt as (
            select chainCode,chainName,count(id) as risk_law_suit_num, count(compcode) as risk_law_suit_ent_num, 
            enddate as reportyear
            from ss_res where substring(string(enddate),0,4) = "{i}"
            group by chainCode,chainName,enddate
            ), sum_tt as (
            select chainCode,chainName,risk_law_suit_num,risk_law_suit_ent_num,reportyear from base_tt where substring(reportyear,-5) != "12-31"
            union
            select chainCode,chainName,sum(risk_law_suit_num) as risk_law_suit_num,sum(risk_law_suit_ent_num) as risk_law_suit_ent_num,
            concat("{i}","-12-31") as reportyear from base_tt group by chainCode,chainName
            ) select chainCode,chainName,risk_law_suit_num,risk_law_suit_ent_num,reportyear from sum_tt
            """
            self.spark.sql(sql).distinct().cache().createOrReplaceTempView("ls_tb")

            sql = f"""
            with
                base_tt as (
            select chainCode,chainName,count(id) as risk_law_suit_num,enddate as reportyear
            from ss_res where substring(string(enddate),0,4) = "{i-1}"
            group by chainCode,chainName,enddate
            ), sum_tt as (
            select chainCode,chainName,risk_law_suit_num,reportyear from base_tt where substring(reportyear,-5) != "12-31"
            union
            select chainCode,chainName,sum(risk_law_suit_num) as risk_law_suit_num,concat("{i-1}","-12-31") as reportyear from base_tt group by chainCode,chainName
            ) select chainCode,chainName,risk_law_suit_num,reportyear from sum_tt
                    """
            self.spark.sql(sql).distinct().cache().createOrReplaceTempView("bf_tb")
            sql = """
            select a.chainCode,a.chainName,
            case when substring(a.reportyear,-5) == "03-31" then concat(substring(a.reportyear,0,4),"-01-01") 
             when substring(a.reportyear,-5) == "06-30" then concat(substring(a.reportyear,0,4),"-04-01") 
             when substring(a.reportyear,-5) == "09-30" then concat(substring(a.reportyear,0,4),"-07-01") 
             else concat(substring(a.reportyear,0,4),"-01-01") 
             end as beginDate,
            a.reportyear as endDate
            ,a.risk_law_suit_num,risk_law_suit_ent_num,
            if(int(abs(b.risk_law_suit_num)) == 0,100,(a.risk_law_suit_num-b.risk_law_suit_num)/abs(b.risk_law_suit_num) * 100) as risk_law_suit_num_increase
            from ls_tb as a left join bf_tb as b 
            on a.chainCode=b.chainCode and a.chainName=b.chainName 
            and int(substring(a.reportyear,0,4))=int(substring(b.reportyear,0,4))+1
            and substring(a.reportyear,-5) == substring(b.reportyear,-5)
            """
            df_res = self.spark.sql(sql).distinct().cache()
            ls.append(df_res)

        self.merge_dfs(ls, 0, len(ls) - 1, res_bk)
        return res_bk[0]

    def init_bs(self):
        sql = f"""
        select chainCode,chainName,labelCode from(
            select chainCode,chainName,labelCode,dataStatus,row_number() over (partition by fingerid order by filedate DESC) num 
            from {self.bs_db}.app_bocsz_ic_graph_info
        ) t where t.num=1 and dataStatus!=3 
        """
        self.spark.sql(sql).distinct().cache().createOrReplaceTempView("bocsz_ic_graph_info_tb")
        sql = f"""select compcode,labelid
            from(
                select compcode,labelid,dataStatus,row_number() over (partition by fingerid order by filedate DESC) num 
                from {self.bs_db}.app_bocsz_ic_ent_rela
            ) t where t.num=1 and dataStatus!=3 and compCode is not NULL"""
        self.spark.sql(sql).distinct().cache().createOrReplaceTempView("bocsz_ic_ent_rela_tb")
        sql = """
        select chainCode,chainName,compcode from bocsz_ic_graph_info_tb as a left join bocsz_ic_ent_rela_tb as b on a.labelCode=b.labelid
        """
        return self.spark.sql(sql).distinct().cache()

    def init_sk(self):
        sql = f"""select distinct compcode from {self.oth.get(self.pt)}.dwd_ms_base_sk_stock where filedate in (
        select max(filedate) as filedate from {self.oth.get(self.pt)}.dwd_ms_base_sk_stock
    ) and dataStatus != 3 and compcode is not NULL"""
        return self.spark.sql(sql).distinct().cache()

    def p_out_type(self, tt_df, sk_df):
        tt_df.createOrReplaceTempView("bocsz_cmp_tb")
        sk_df.createOrReplaceTempView("base_sk_stock_tb")
        sql = """
        select chainCode,chainName,a.compcode
    from bocsz_cmp_tb as a left join base_sk_stock_tb as b on a.compcode =b.compcode where b.compcode is not null"""
        sk_res = self.spark.sql(sql).distinct().cache()

        sql = """
        select chainCode,chainName,a.compcode
    from bocsz_cmp_tb as a left join base_sk_stock_tb as b on a.compcode =b.compcode where b.compcode is null"""
        gs_res = self.spark.sql(sql).distinct().cache()
        return sk_res, gs_res

    def init_nb(self, df):
        sql = f"""
        select compCode, if(totaltax is null, 0, totaltax) as operate_tax, employeesum as employment_num, fiscalyear 
        from(
            select compCode,reportid,
                    regexp_extract(totaltax, '[0-9]+', 0) as totaltax,    -- 纳税总额
                    regexp_extract(employeesum, '[0-9]+', 0) as employeesum,   -- 从业人数  同期
                    datastatus, fiscalyear, sourceid, row_number() over (partition by sourceid order by filedate desc, modifyTime desc) num
            from {self.oth.get(self.pt)}.dwd_me_news_comp_annual_report 
        ) t where t.num=1 and t.datastatus != 3 and sourceid is not null """
        return self.spark.sql(sql).join(df, ["compcode"], "inner").distinct().cache()

    def init_fin_main_index(self,df):
        sql = f""" with
        base_df as (
            select compcode,concat(substring(string(enddate),0,4),"-",substring(string(enddate),5,2),"-",substring(string(enddate),-2)) as enddate,
                netProfit / 10000 as operate_profit,  -- 净利润
                bizIncome / 10000 as operate_revenue, -- 营业收入
                totAsset / 10000 as  operate_assets,   -- 资产总额
                totDebt / 10000 as operate_liabilities,  -- 负债总额
                totalProfitb / 10000 as totalProfitb
            from {self.oth.get(self.pt)}.ads_ms_fin_mainindex where filedate in (
                select max(filedate) as filedate from {self.oth.get(self.pt)}.ads_ms_fin_mainindex
            ) and dataStatus != 3 and compcode is not NULL 
        ) select compcode,enddate,operate_profit,operate_revenue,operate_assets,operate_liabilities,totalProfitb 
        from base_df where substring(string(enddate),0,4) >= "2019" and substring(enddate,-5) in ("03-31","06-30","09-30","12-31")
"""
        return self.spark.sql(sql).join(df, ["compcode"], "inner").distinct().cache()

    def init_ss(self, df):
        sql = f"""select id,submittime,judgetime
        from(
            select id,caseId,date_format(submittime, 'yyyy-MM-dd') as submittime,
            date_format(judgetime, 'yyyy-MM-dd') as judgetime
                ,datastatus,isvalid
                ,row_number() over (partition by id order by filedate desc) num 
                from {self.oth.get(self.pt)}.dwd_me_lega_suso
        ) t where t.num=1 AND t.datastatus !=3 AND t.isvalid= '1' 
        AND submittime <= date_format(current_timestamp(),'yyyy-MM-dd') 
        AND submittime >= '1970-01-01'"""
        self.spark.sql(sql).distinct().cache().createOrReplaceTempView("ss_tb")

        sql = f"""select pkid, compcode from(
        select pkid,compcode,datastatus,row_number() over (partition by fingerid order by filedate desc) num2 
            from {self.oth.get(self.pt)}.dwd_me_lega_suso_se 
    ) t where t.num2=1 and t.compcode is not null AND t.datastatus !=3
"""
        self.spark.sql(sql).distinct().cache().createOrReplaceTempView("ssse_tb")
        sql = """
        SELECT id, compcode, 
            case when substring(judgetime,-5) <= "03-31" then concat(substring(judgetime,0,4),"-03-31")
                 when substring(judgetime,-5) > "03-31" and substring(judgetime,-5) <= "06-30" then concat(substring(judgetime,0,4),"-06-30")
                 when substring(judgetime,-5) > "06-30" and substring(judgetime,-5) <= "09-30" then concat(substring(judgetime,0,4),"-09-30")
                 when substring(judgetime,-5) < "09-30" then concat(substring(judgetime,0,4),"-12-31") end as enddate
        FROM ss_tb AS tb1 JOIN ssse_tb AS tb2 
        ON tb1.id = tb2.pkid
        """
        return self.spark.sql(sql).join(df, ["compcode"], "inner").distinct().cache()

    def init_fin_gscw(self, df):
        sql = f"""
        select compcode, totaltaxpayer as operate_tax, netprofit as operate_profit, operincome as operate_revenue,
            if(employee is null, 0, employee) as employment_num, if(totprofit is not null, totprofit, 0) as totprofit,
            reportyear,
            if(totalassets is null, 0, totalassets) as operate_assets,
            if(totalliability is null, 0, totalliability) as operate_liabilities,
            if(totalassets is null, 0, 1) as operate_assets_flag,if(totalliability is null, 0, 1) as operate_liabilities_flag,
            if(operincome is not null,if(employee is null, 0, employee),0) as operate_revenue_employee_flag,
            if(totprofit is not null,if(employee is null, 0, employee),0) as totprofit_employee_flag,
            if(totaltaxpayer is null, 0,if(employee is null, 0, employee)) as operate_tax_flag
        from(
            select *,row_number() over (partition by fingerid order by filedate DESC) num from {self.oth.get(self.pt)}.dws_ms_fin_gscw
        ) t where t.num=1 and dataStatus!=3 and compCode is not NULL and reportyear >= "{self.start_year}" 
        """
        return self.spark.sql(sql).join(df, ["compcode"], "inner").distinct().cache()

    def merge_double_df(self, df1, df2):
        return df1.unionByName(df2).cache()

    def merge_dfs(self, dfs, low, high, res_back):
        res = []
        mid = math.ceil((low + high) / 2)
        last = len(dfs) - mid * 2
        for i in range(mid):
            res.append(self.merge_double_df(dfs[i], dfs[i + mid]))

        if last != 0:
            res.append(dfs[high])

        if len(res) == 1:
            res_back.append(self.merge_double_df(dfs[low], dfs[high]))
            return

        self.merge_dfs(res, 0, len(res) - 1, res_back)

    @staticmethod
    def add_finger_id(row):
        row_dict = row.asDict()
        row_dict["fingerid"] = JSAPPCHINInfo.gen_graph_id([
            row_dict.get("chainCode"),
            row_dict.get("chainName"),
            row_dict.get("beginDate"),
            row_dict.get("endDate"),
            row_dict.get("operate_tax"),
            row_dict.get("operate_profit"),
            row_dict.get("operate_profit_increase"),
            row_dict.get("operate_revenue"),
            row_dict.get("operate_revenue_increase"),
            row_dict.get("operate_assets"),
            row_dict.get("operate_liabilities"),
            row_dict.get("credit_ent_num"),
            row_dict.get("credit_ent_increase"),
            row_dict.get("credit_loan_balance"),
            row_dict.get("credit_loan_balance_increase"),
            row_dict.get("employment_num"),
            row_dict.get("employment_num_increase"),
            row_dict.get("employment_revenue"),
            row_dict.get("employment_profit"),
            row_dict.get("employment_tax"),
            row_dict.get("risk_law_suit_ent_num"),
            row_dict.get("risk_law_suit_num"),
            row_dict.get("risk_law_suit_num_increase")
        ])
        return Row(**row_dict)

    def pz_base(self):
        res_ls = []
        res_bk = []
        for i in range(self.begin_year, self.cur_year + 1):
            for j in range(4):
                sql = f"""
                select chainCode,chainName,concat("{i}","-", "{self.q_begin_list[j]}") as beginDate,concat("{i}","-", "{self.q_last_list[j]}") as endDate from(
                    select chainCode,chainName,labelCode,dataStatus,row_number() over (partition by fingerid order by filedate DESC) num 
                    from {self.bs_db}.app_bocsz_ic_graph_info
                ) t where t.num=1 and dataStatus!=3 
                """
                res_ls.append(self.spark.sql(sql).distinct().cache())

        self.merge_dfs(res_ls, 0, len(res_ls) - 1, res_bk)
        self.__show("pz_base  count -  show  ", " res_df  ", res_bk[0], flag=True, num=100)
        return res_bk[0]

    def final_gen_inc(self, gsdf, skdf):
        gsdf.unionByName(skdf).distinct().cache().createOrReplaceTempView("nor_df")
        sql = """
        select chainCode,chainName,endDate,
        sum(double(operate_tax)) as operate_tax,sum(double(operate_profit)) as operate_profit,sum(double(operate_revenue)) as operate_revenue,sum(double(employment_num)) as employment_num,sum(double(operate_assets)) as operate_assets,sum(double(operate_liabilities)) as operate_liabilities,sum(double(employment_revenue)) as employment_revenue,sum(double(employment_profit)) as employment_profit,sum(double(employment_tax)) as employment_tax
        from nor_df group by chainCode,chainName,endDate
        """
        self.spark.sql(sql).distinct().cache().createOrReplaceTempView("fin_res")
        ls = []
        res_bk = []
        for i in range(self.begin_year, self.cur_year + 1):
            for j in range(4):
                sql = f"""
                select chainCode,chainName,endDate,
                    operate_tax,operate_profit,operate_revenue,employment_num,operate_assets,operate_liabilities,employment_revenue,employment_profit,employment_tax
                from fin_res where endDate = "{i}-{self.q_last_list[j]}"
                """
                self.spark.sql(sql).distinct().cache().createOrReplaceTempView("ls_tb")

                sql = f"""
                select chainCode,chainName,operate_profit,operate_revenue,employment_num
                from fin_res 
                where endDate = "{i-1}-{self.q_last_list[j]}"
                """
                self.spark.sql(sql).distinct().cache().createOrReplaceTempView("bf_tb")

                sql = """
                select a.chainCode,a.chainName,endDate,operate_tax,a.operate_profit,
                if(int(abs(b.operate_profit)) == 0,100,(a.operate_profit-b.operate_profit)/abs(b.operate_profit) * 100) as operate_profit_increase,
                a.operate_revenue,
                if(int(abs(b.operate_revenue)) == 0,100,(a.operate_revenue-b.operate_revenue)/abs(b.operate_revenue) * 100) as operate_revenue_increase,
                operate_assets,operate_liabilities,a.employment_num,
                if(int(abs(b.employment_num)) == 0,100,(a.employment_num-b.employment_num)/b.employment_num * 100) as employment_num_increase,
                employment_revenue,
                employment_profit,employment_tax
                from ls_tb as a left join bf_tb as b
                on a.chainCode=b.chainCode and a.chainName=b.chainName
                """
                df_res = self.spark.sql(sql).distinct().cache()
                ls.append(df_res)

        self.merge_dfs(ls, 0, len(ls) - 1, res_bk)
        self.__show("final_gen_inc    ", " res_bk[0]  ", res_bk[0], 1000, True)
        return res_bk[0]

    def final_union(self, base_df,gs_res,sk_res,ss_res):
        self.pz_base().createOrReplaceTempView("base_df")
        self.final_gen_inc(gs_res, sk_res).createOrReplaceTempView("nor_df")
        ss_res.createOrReplaceTempView("ss_res")

        sql = """
        select chainCode,chainName,endDate,ROUND(operate_tax, 4) as operate_tax,ROUND(operate_profit, 4) as operate_profit,
            ROUND(operate_profit_increase, 4) as operate_profit_increase,ROUND(operate_revenue, 4) as operate_revenue,
            ROUND(operate_revenue_increase, 4) as operate_revenue_increase,ROUND(operate_assets, 4) as operate_assets,
            ROUND(operate_liabilities, 4) as operate_liabilities,int(employment_num) as employment_num,
            ROUND(employment_num_increase, 4) as employment_num_increase,ROUND(employment_revenue, 4) as employment_revenue,
            ROUND(employment_profit, 4) as employment_profit,ROUND(employment_tax, 4) as employment_tax
        from nor_df
        """
        normal_df = self.spark.sql(sql).distinct().cache()

        sql = """
        select chainCode,chainName,endDate,
            risk_law_suit_ent_num,risk_law_suit_num,ROUND(risk_law_suit_num_increase, 4) as risk_law_suit_num_increase
        from ss_res
        """
        ss_df = self.spark.sql(sql).distinct().cache()
        aaa = normal_df.join(ss_df, ["chainCode", "chainName", "endDate"], "full").distinct().cache()
        aaa.createOrReplaceTempView("ss_res1")
        self.__show("aaa  count -  show  ", " cur_df  ", aaa, num=1000)

        sql = f"""
        select a.*,operate_tax,operate_profit,operate_profit_increase,
            operate_revenue,operate_revenue_increase,operate_assets,operate_liabilities,
            null as credit_ent_num,null as credit_ent_increase,null as credit_loan_balance,null as credit_loan_balance_increase,employment_num,
            employment_num_increase,employment_revenue,employment_profit,employment_tax,
            risk_law_suit_ent_num,risk_law_suit_num,risk_law_suit_num_increase,1 as dataStatus
        from base_df as a join ss_res1 as d
        on a.chainCode=d.chainCode and a.chainName=d.chainName and a.endDate=d.endDate
        where replace(a.endDate,"-", "") <= "{self.file_date}" 
        """
        cur_df = self.spark.sql(sql).distinct().cache()
        self.__show("final_union  count -  show  ", " cur_df  ", cur_df, num=1000)
        f_df = self.spark.createDataFrame(cur_df.rdd.map(self.add_finger_id), samplingRatio=1) \
            .distinct() \
            .withColumn("filedate", F.lit(self.file_date)) \
            .withColumn("datastatus", F.lit(1)) \
            .withColumn("createTime", F.lit(self.modify_time)) \
            .withColumn("modifytime", F.lit(self.modify_time))

        f_df.createOrReplaceTempView("f_df_tb")
        sql = """ 
        select chainCode,chainName,if(substring(beginDate,-5) == "10-01", concat(substring(string(beginDate),0,4),"-", "01-01"),beginDate) as beginDate,
        endDate,operate_tax,operate_profit,operate_profit_increase,operate_revenue,operate_revenue_increase,
        operate_assets,operate_liabilities,credit_ent_num,credit_ent_increase,credit_loan_balance,credit_loan_balance_increase,employment_num,
        employment_num_increase,employment_revenue,employment_profit,employment_tax,risk_law_suit_ent_num,risk_law_suit_num,risk_law_suit_num_increase,
        fingerid,dataStatus,createTime,modifyTime,filedate from f_df_tb
        """
        self.compare_data_save_inc(self.spark.sql(sql).distinct().cache(), self.tb_fall_name)

    def compare_data_save_inc(self, cur_df, tb, partition_by="fingerid", unique_id="fingerid"):
        sql = f"""select * from {tb}"""
        old_data = self.hive_tb_clear(self.spark.sql(sql), [partition_by]).where("datastatus!=3").cache()
        inc_df = cur_df.join(old_data, [unique_id], how="left_anti").distinct().cache()
        if not TaskBase().df_is_empty(old_data):
            dec_df = old_data.join(cur_df, [unique_id], how="left_anti").distinct().cache()
            if not TaskBase().df_is_empty(dec_df):
                final_df = dec_df.drop("datastatus").withColumn("filedate", F.lit(self.file_date)) \
                    .withColumn("datastatus", F.lit(3)).distinct().cache()

                final_df.write.partitionBy("filedate").format("hive").mode("append").saveAsTable(tb)

        if not TaskBase().df_is_empty(inc_df):
            inc_df.distinct().write.partitionBy("filedate").format("hive").mode("append").saveAsTable(tb)
        else:
            print("无增量", tb)

    def __show(self, name="", msg="", df=None, num=100, flag=False):
        if self.is_debug or flag:
            print(f"""-{name} ------>  {msg}""")
            if df:
                print("count---->  ", df.count())
                df.show(num, truncate=False)


if __name__ == '__main__':
    JSAPPCHINInfo().process()