#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time   :2024/3/25 09:57
# @Author :王凯
# @File   :__init__.py.py
# @Project:scrapy_spider
import concurrent.futures
import datetime
import os
import re
import sys
import warnings
from pathlib import Path

import numpy as np
import pandas as pd
import parsel
from bs4 import BeautifulSoup
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from loguru import logger

sys.path.append(Path(__file__).parent.parent.parent.parent.as_posix())
from components.config import WFQ_SOURCE_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.tools import (
    unescape,
    replace_str,
    TimeMixIn,
    ReMixIn,
    table_json,
)

warnings.filterwarnings("ignore")
os.environ["OPENAI_API_KEY"] = "9ffe46756ca6484eab69f8b8a1bb4e0d"
os.environ["OPENAI_API_BASE"] = "https://wfq-azure.openai.azure.com/"
os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview"
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_GPT_DEPLOYMENT_NAME"] = "WF1-gpt-35-turbo-0301"


class BaseCleanModel:
    label_df: pd.DataFrame = None

    def __init__(self):
        self.db = MysqlDB(
            ip=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_IP"],
            port=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_PORT"],
            db=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_DB"],
            user_name=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_NAME"],
            user_pass=WFQ_SOURCE_MYSQL_CONFIG["MYSQL_USER_PASS"],
        )
        self.net_tax_policy = "net_tax_policy"
        self.clean_tax_policy = "clean_tax_policy"
        self.clean_tax_policy_project = "clean_tax_policy_project"
        self.clean_tax_policy_department_mapping = "clean_tax_policy_department_mapping"
        self.clean_tax_policy_department = "clean_tax_policy_department"
        self.label_tax_policy = "label_tax_policy"
        self.label_tax_policy_rule = "label_tax_policy_rule"
        self.result_tax_policy = "result_tax_policy"
        self.result_tax_policy_project = "result_tax_policy_project"
        self.result_tax_policy_department = "result_tax_policy_department"
        self.result_tax_policy_department_mapping = (
            "result_tax_policy_department_mapping"
        )

    def get_data(
        self,
        table: str = "label_tax_policy",
        field: str = "*",
        conditions: str = "1=1",
        sql: str = None,
    ):
        if sql is None:
            sql = f"select {field} from {table} where {conditions}"
        logger.info(sql)
        return self.db.find(sql, to_json=True)

    def _init_label_rule_data(self, field: str = "*", conditions: str = "1=1"):
        """初始化标签表"""
        label_data = self.get_data(
            table=self.label_tax_policy_rule, field=field, conditions=conditions
        )
        label_df = pd.DataFrame(label_data)
        return label_data, label_df

    @staticmethod
    def str_contains(txt, reg):
        return bool(ReMixIn.reg_one(reg.split("|"), txt))

    def label_function_row(self, series, origin_field, target_field, rules: list):
        """
        1. 仅匹配负向关键词 包含
        2. 匹配排除正向、命中负向关键词
        3. 仅匹配正向关键词 包含
        4. 匹配正向、排除负向关键词
        """
        origin = series[origin_field]
        ret = False
        rules_1 = [rule for rule in rules if str(rule["type"]) == "1"]
        ret_1 = self.deal_rule_type_1(rules_1, origin)
        if ret_1 is False:
            series[target_field] = ret_1
            return series

        rules_2 = [rule for rule in rules if str(rule["type"]) == "2"]
        ret_2 = self.deal_rule_type_2(rules_2, origin)
        if ret_2 is False:
            series[target_field] = ret_2
            return series

        rules_4 = [rule for rule in rules if str(rule["type"]) == "4"]
        ret_4 = self.deal_rule_type_4(rules_4, origin)
        if ret_4 is True:
            series[target_field] = ret_4
            return series

        rules_3 = [rule for rule in rules if str(rule["type"]) == "3"]
        ret_3 = self.deal_rule_type_3(rules_3, origin)
        if ret_3 is True:
            series[target_field] = ret_3
            return series

        series[target_field] = ret
        return series

    def deal_rule_type_1(self, rules, origin):
        """处理type=1的规则 仅匹配负向关键词 包含"""
        ret = True
        for rule in rules:
            negative_keyword = rule["negative_keyword"] or ""
            if "&" in negative_keyword:
                negative_keyword = [i for i in negative_keyword.split("&") if i.strip()]
            else:
                negative_keyword = [negative_keyword] or []
            if str(rule["type"]) == "1":  # 仅匹配负向关键词 包含
                result = not (
                    all([self.str_contains(origin, i) for i in negative_keyword])
                )
            else:
                raise ValueError(f"出现新型规则类型 `{rule['type']}`，请及时更新代码")
            # logger.info(f'rule: {rule["type"],origin, positive_keyword, negative_keyword, result}')
            ret = ret and result
            if ret is False:
                return ret
        return ret

    def deal_rule_type_2(self, rules, origin):
        """处理type=2的规则 匹配排除正向、命中负向关键词"""
        ret = True
        for rule in rules:
            positive_keyword = rule["positive_keyword"] or ""
            if "&" in positive_keyword:
                positive_keyword = [i for i in positive_keyword.split("&") if i.strip()]
            else:
                positive_keyword = [positive_keyword] or []
            negative_keyword = rule["negative_keyword"] or ""
            if str(rule["type"]) == "2":  # 匹配排除正向、命中负向关键词
                result = not (
                    not all([self.str_contains(origin, i) for i in negative_keyword])
                    & all([self.str_contains(origin, i) for i in positive_keyword])
                )
            else:
                raise ValueError(f"出现新型规则类型 `{rule['type']}`，请及时更新代码")
            # logger.info(f'rule: {rule["type"],origin, positive_keyword, negative_keyword, result}')
            ret = ret and result
            if ret is False:
                return ret
        return ret

    def deal_rule_type_3(self, rules, origin):
        """处理type=3的规则 仅匹配正向关键词 包含"""
        ret = True
        for rule in rules:
            positive_keyword = rule["positive_keyword"] or ""
            if "&" in positive_keyword:
                positive_keyword = [i for i in positive_keyword.split("&") if i.strip()]
            else:
                positive_keyword = [positive_keyword] or []
            negative_keyword = rule["negative_keyword"] or ""
            if str(rule["type"]) == "3":  # 匹配排除正向、命中负向关键词
                result = all([self.str_contains(origin, i) for i in positive_keyword])
            else:
                raise ValueError(f"出现新型规则类型 `{rule['type']}`，请及时更新代码")
            # logger.info(f'rule: {rule["type"],origin, positive_keyword, negative_keyword, result}')
            if result is True:
                return result
            ret = ret and result
        return ret

    def deal_rule_type_4(self, rules, origin):
        """处理type=4的规则 匹配正向、排除负向关键词"""
        ret = True
        for rule in rules:
            positive_keyword = rule["positive_keyword"] or ""
            if "&" in positive_keyword:
                positive_keyword = [i for i in positive_keyword.split("&") if i.strip()]
            else:
                positive_keyword = [positive_keyword] or []
            negative_keyword = rule["negative_keyword"] or ""
            if str(rule["type"]) == "4":  # 匹配排除正向、命中负向关键词
                result = all(
                    [self.str_contains(origin, i) for i in positive_keyword]
                ) & (not all([self.str_contains(origin, i) for i in negative_keyword]))
            else:
                raise ValueError(f"出现新型规则类型 `{rule['type']}`，请及时更新代码")
            # logger.info(f'rule: {rule["type"],origin, positive_keyword, negative_keyword, result}')
            if result is True:
                return result
            ret = ret and result
        return ret

    def save(
        self,
        df,
        skip_fix=True,
        ship_fix_pk="document_id",
        skip_fix_condition="mark=1",
        save_table=None,
        batch_size=1000,
        **kwargs,
    ):
        if save_table is None:
            save_table = self.clean_tax_policy
        df = df.replace({np.NaN: None})
        data = []
        if skip_fix:
            document_id = df[ship_fix_pk].unique().tolist()
            document_id_str = ",".join([f'"{i}"' for i in document_id])
            if document_id_str:
                db_data = self.db.find(
                    f"select {ship_fix_pk} from {save_table} where {skip_fix_condition} and {ship_fix_pk} in ({document_id_str})",
                    to_json=True,
                )
                frozen_document_id = [i[ship_fix_pk] for i in db_data]
                df = df[~df[ship_fix_pk].isin(frozen_document_id)]
                data = df.to_dict("records")
        else:
            data = df.to_dict("records")
        if data:
            for i in range(len(data) // batch_size + 1):
                tmp_save = data[i * batch_size:(i + 1) * batch_size]
                row_count = self.db.add_batch_smart(save_table, tmp_save, **kwargs)
                logger.info(
                    f"保存 {save_table} 数量「{len(tmp_save)}」影响「{row_count}」拓展字段 {kwargs}"
                )
        else:
            logger.info(f"没有需要保存的数据，表「{save_table}」")

    def save_mapping(
        self,
        df,
        skip_fix_condition="state=1",
        field="src_department,std_department,short_department,province,city,county,park,state",
        save_table=None,
        batch_size=1000,
        **kwargs,
    ):
        if save_table is None:
            save_table = self.clean_tax_policy_department_mapping
        fields = field.split(",")
        data = df.to_dict("records")
        if data:
            for i in range(len(data) // batch_size + 1):
                tmp_save = data[i * batch_size:(i + 1) * batch_size]

                sub_sql = (
                    f"select {field} from {save_table} where {skip_fix_condition} and "
                )
                sub_sql_list = [
                    sub_sql
                    + " and ".join(
                        [
                            f"{i.strip()} = '{('' if d.get(i.strip()) is None else d.get(i.strip()))}' "
                            for i in fields
                            if i != "state"
                        ]
                    )
                    for d in tmp_save
                ]
                df_sub = pd.DataFrame(tmp_save)
                query_sql = " union all ".join(sub_sql_list)

                db_data = self.db.find(
                    query_sql,
                    to_json=True,
                )
                db_df = pd.DataFrame(db_data)
                if not db_df.empty:
                    # 取差集
                    diff_df = pd.merge(
                        df_sub, db_df, how="outer", indicator=True, on=fields
                    ).loc[lambda x: x["_merge"] == "left_only"]
                    tmp_save = df_sub[
                        df_sub["src_department"].isin(diff_df["src_department"])
                    ].to_dict("records")
                if tmp_save:
                    row_count = self.db.add_batch_smart(save_table, tmp_save, **kwargs)
                    logger.info(
                        f"保存 {save_table} 数量「{len(tmp_save)}」影响「{row_count}」拓展字段 {kwargs}"
                    )
        else:
            logger.info(f"没有需要保存的数据，表「{save_table}」")


class FitterKeywords(BaseCleanModel):
    @staticmethod
    def delete_blank(text):
        text = unescape(text)
        text = replace_str(text, r"\u200b", "")
        text = replace_str(text, r"\s+", "")
        return text

    @staticmethod
    def _state_mapping(state):
        if state:
            if any(c in state for c in ["有效", "长期有效", "全文有效", "1"]):
                result = 1
            elif any(
                c in state
                for c in [
                    "部分有效",
                    "条款失效",
                    "需要修改",
                    "暂时保留",
                    "已修改",
                    "修改",
                ]
            ):
                result = 0
            elif any(c in state for c in ["失效", "无效", "废止", "全文废止", "废弃"]):
                result = -1
            else:
                # raise ValueError(f"是否有效出现新状态:「{state}」")
                logger.error(f"是否有效出现新状态:「{state}」")
                result = 0
        else:
            result = 0
        return result

    @staticmethod
    def _process_start_or_end_date(content, publish_date=None):
        response = parsel.Selector(content)
        text = response.xpath("string()").get()

        this_year = parse(publish_date).year

        begin_date, end_date = publish_date, None

        begin_date = TimeMixIn.format_date(
            ReMixIn.reg_one([r"自(.{1,20})起施行"], text, reverse=True)
        )
        if not begin_date:
            after_days = ReMixIn.reg_one(
                [r"自发布之日起(.\d+)日后施行"], text, reverse=True
            )
            if after_days:
                begin_date = (
                    parse(TimeMixIn.format_date(publish_date))
                    + relativedelta(days=+int(after_days))
                ).strftime("%Y-%m-%d")
        if begin_date:
            if not re.findall(r"\d{4}-\d{2}-\d{2}", begin_date):
                new_begin_date = f"{this_year}年" + begin_date
                logger.debug(
                    f"日期格式化出错 自动转换为 「{begin_date}」-> 「{new_begin_date}」"
                )
                begin_date = TimeMixIn.format_date(new_begin_date)
                begin_date = None
        end_date = TimeMixIn.format_date(
            ReMixIn.reg_one([r"有效期至(.{1,20})"], text, reverse=True)
        )
        return begin_date or publish_date, end_date or None

    def _sub_process_time(self, series):
        publish_date, content, source_url = series[
            ["publish_date", "content", "source_url"]
        ]
        begin_date, end_date = self._process_start_or_end_date(content, publish_date)
        if begin_date and not re.findall(r"\d{4}-\d{2}-\d{2}", begin_date):
            print(begin_date, source_url)
        if end_date and not re.findall(r"\d{4}-\d{2}-\d{2}", end_date):
            print(end_date, source_url)
        series["begin_date"] = begin_date
        series["end_date"] = end_date
        return series

    def process_time(self, df):
        logger.info("开始处理 开始结束时间")
        df = df.apply(self._sub_process_time, axis=1)
        return df

    @staticmethod
    def process_mark(df):
        df["mark"] = 0
        return df

    def process_state(self, df):
        logger.info("开始处理 有效性")
        df["state"] = df["state"].map(self._state_mapping)
        return df

    @staticmethod
    def process_set_dup_by_title(df_pro: pd.DataFrame):
        """根据标题去重"""
        logger.info(f"开始处理 根据标题去重 去重前 {df_pro.shape[0]}")
        df_pro = df_pro.sort_values(by="publish_date", ascending=True)
        df_condition = df_pro.duplicated(subset="title", keep="last")
        df_pro["state"] = df_pro["state"].mask(df_condition, -1)
        logger.info(f"根据标题去重 去重后 {df_pro.shape[0]}")
        logger.info(f"根据标题去重 去重前 {df_pro[['document_id', 'title']].to_dict('records')}")
        return df_pro

    @staticmethod
    def __parse_header_table(response):
        item = table_json(response)
        new_item = {}
        for k, v in item.items():
            tmp_k = replace_str(k, "([:：>].*)", "")
            if tmp_k:
                k = tmp_k
            k = "".join(k.split())
            new_item.update({k: v})
        return new_item

    def __process_content(self, content):
        content = replace_str(content, "<!--(.|\n)*?-->")
        soup = BeautifulSoup(content, "html.parser")
        for tag in soup.find_all():
            if tag.name in ["img", "script", "style"]:
                tag.decompose()
            else:
                if not tag.attrs:
                    continue
                for j in ["style", "class", "id"]:
                    if j in tag.attrs:
                        del tag[j]
                if tag.name == "a":
                    if "href" in tag.attrs:
                        href = tag["href"]
                        # new_href = self.replace_oss_url(href)
                        new_href = href  # todo
                        tag["href"] = new_href
        content = soup.decode()
        return content

    def _process_html_std(self, series):
        content, source_url = series[["content", "source_url"]]
        content = str(content)
        response = parsel.Selector(content)
        series["content"] = self.__process_content(content)
        table_dict = self.__parse_header_table(response.xpath("//table[1]"))
        # 替换标准标题
        title = self.delete_blank(
            response.xpath("string(//title)").get()
            or response.xpath("//meta[contains(@name, 'itle')]/@content").get()
            or series["title"]
        )
        series["title"] = (
            series["title"] if any(c in title for c in ["政策详情页"]) else title
        )
        # 初步筛选文号等信息
        rules = {
            "publish_no": ["文号", "备注/文号", "文件编号"],
            "state": ["有效性"],
            "department": ["发布机构", "责任部门", "发布单位"],
        }
        # if table_dict:
        #     logger.debug(f"{table_dict} {source_url}")
        for k, v in table_dict.items():
            for key, values in rules.items():
                if any(c in k for c in values):
                    tmp_v = series[key] or v
                    if isinstance(tmp_v, str):
                        series[key] = tmp_v.replace("\\", "").replace("\u200b", "")
                    else:
                        series[key] = tmp_v
        if "publish_no" in series:
            publish_no = series["publish_no"]
            if publish_no:
                publish_no = self.delete_blank(publish_no)
                if any(c in publish_no for c in ["号"] + list(map(str, range(10)))):
                    if any(c in publish_no for c in ["编号"]):
                        series["publish_no"] = ""
                    else:
                        series["publish_no"] = publish_no
                else:
                    series["publish_no"] = ""
        return series

    def process_html(self, df_pro: pd.DataFrame) -> pd.DataFrame:
        logger.info("开始处理 html 并标准化字段")
        df_pro = df_pro.apply(self._process_html_std, axis=1)
        df_pro = df_pro.replace({np.NAN: None})
        return df_pro

    def filter_is_policy(
        self, df_pro: pd.DataFrame, label_name="是否规范性文件"
    ) -> pd.DataFrame:
        logger.info(f"开始过滤规范性政策文件 {df_pro.shape[0]}")
        label_data, label_df = self._init_label_rule_data(
            field="*",
            conditions=f"origin='net_tax_policy' and obj = '文件' and label_name = '{label_name}' and state=1",
        )
        rules_list_mapping = {}
        for i in label_data:
            list_mapping_key = (
                f'{i["origin"]}{i["origin_field"]}{i["target"]}{i["target_field"]}'
            )
            if list_mapping_key in rules_list_mapping:
                rules_list_mapping[list_mapping_key].append(i)
            else:
                rules_list_mapping[list_mapping_key] = [i]
        for k, tmp_rules in rules_list_mapping.items():
            # logger.debug(f"rules {tmp_rules}")
            # for r in tmp_rules:
            #     logger.debug(f"{r['type'],r['positive_keyword'], r['negative_keyword']}")
            tmp_label_df = pd.DataFrame(tmp_rules)
            tmp_label_df = tmp_label_df.sort_values(
                by="type", ascending=True
            )  # 排序按顺序打标
            df_pro = self._process_is_policy(label_df=tmp_label_df, df_pro=df_pro)
        return df_pro

    def _process_is_policy(self, label_df, df_pro):
        rules = label_df.to_dict(orient="records")
        df_pro_copy = df_pro.copy()
        if rules:
            df_pro_copy = df_pro_copy.apply(
                self.label_function_row,
                axis=1,
                args=(rules[0]["origin_field"], rules[0]["target_field"], rules),
            )
        return df_pro_copy

    @staticmethod
    def _department_split(department_text, to_list=False):
        tmp_department = re.sub(r"[（(](.*?)[）)]", "", department_text or "")
        if tmp_department:
            tmp_department_list = re.split(r"[、\s|]", tmp_department)
            if tmp_department_list:
                return tmp_department_list if to_list else tmp_department_list[0]
        return [tmp_department] if to_list else tmp_department

    def _fill_department(self, series):
        department, province, city, county = series[
            ["department", "province", "city", "county"]
        ]
        new_department = department
        if department:
            tmp_new_department_list = self._department_split(
                new_department, to_list=True
            )
            tmp_result = []
            for i in tmp_new_department_list:
                if i.startswith("省"):
                    tmp_result.append(province.replace("省", "") + i)
                elif department.startswith("市"):
                    tmp_result.append(city.replace("市", "") + i)
                elif department.startswith("区"):
                    tmp_result.append(county.replace("区", "") + i)
                elif department.startswith("县"):
                    tmp_result.append(county.replace("县", "") + i)
                else:
                    tmp_result.append(i)
            new_department = "|".join(tmp_result)
        series["new_department"] = new_department
        return series

    def _fill_department_by_database(self, df):
        sub_set = ["new_department", "province", "city", "county", "park"]
        df["new_department"] = df["new_department"].map(self._department_split)
        df_pro = df[sub_set].drop_duplicates(subset=sub_set)
        logger.info(
            f"匹配数据库映射表数据 「{self.result_tax_policy_department_mapping}」{len(df_pro)}"
        )
        df_pro = df_pro.rename(columns={"new_department": "std_department"})
        all_infos = df_pro.to_dict("records")
        conditions = [
            "(select {columns} from {table} where {cond})".format(
                columns=",".join(df_pro.columns.to_list() + ["hierarchy"]),
                table=self.result_tax_policy_department_mapping,
                cond=" and ".join(['{k}="{v}"'.format(k=k, v=v) for k, v in i.items()]),
            )
            for i in all_infos
        ]
        conditions_sql = " union all ".join(conditions)
        database_data = self.db.find(conditions_sql, to_json=True)

        database_province_mapping = {
            i["std_department"]: i["province"] for i in database_data
        }
        df["province"] = df["new_department"].map(
            lambda x: database_province_mapping.get(x) or x
        )

        database_city_mapping = {i["std_department"]: i["city"] for i in database_data}
        df["city"] = df["new_department"].map(
            lambda x: database_city_mapping.get(x) or x
        )

        database_county_mapping = {
            i["std_department"]: i["county"] for i in database_data
        }
        df["county"] = df["new_department"].map(
            lambda x: database_county_mapping.get(x) or x
        )

        database_park_mapping = {i["std_department"]: i["park"] for i in database_data}
        df["park"] = df["new_department"].map(
            lambda x: database_park_mapping.get(x) or x
        )

        database_hierarchy_mapping = {
            i["std_department"]: i["hierarchy"] for i in database_data
        }
        df["hierarchy"] = df["new_department"].map(
            lambda x: database_hierarchy_mapping.get(x) or x
        )

        if "department" in df:
            df = df.drop(columns=["department"])
        df = df.rename(columns={"new_department": "department"})

        return df

    def _add_new_department(self, df):
        sub_set = ["new_department", "department", "province", "city", "county", "park"]
        df_pro = df[sub_set].drop_duplicates(subset=sub_set)
        df_pro = df_pro.rename(
            columns={"new_department": "std_department", "department": "src_department"}
        )
        all_infos = df_pro.to_dict("records")
        conditions = [
            "(select {columns} from {table} where {cond})".format(
                columns=",".join(df_pro.columns.to_list()),
                table=self.result_tax_policy_department_mapping,
                cond=" and ".join(
                    [
                        "{}='{}'".format(k, (v or ""))
                        for k, v in i.items()
                        if k not in "std_department"
                    ]
                ),
            )
            for i in all_infos
        ]
        conditions_sql = " union all ".join(conditions)
        database_data = self.db.find(conditions_sql, to_json=True)
        database_data_set = set(tuple(sorted(d.items())) for d in database_data)
        all_infos_set = set(tuple(sorted(d.items())) for d in all_infos)
        result = []
        for item in all_infos_set - database_data_set:
            tmp_dict = {**dict(item), **{"state": 0}}
            if "|" in (tmp_dict.get("std_department") or ""):
                for i in self._department_split(
                    tmp_dict.get("std_department"), to_list=True
                ):
                    if i:
                        result.append(
                            {**tmp_dict, **{"std_department": i, "src_department": i}}
                        )
            else:
                if tmp_dict.get("std_department") and tmp_dict.get("src_department"):
                    result.append(tmp_dict)
        if result:
            # todo 含多个发布部门，可拆维表（政策id、标准发布部门作唯一标识）
            # insert ignore
            row_count = self.db.add_batch_smart(
                table=self.result_tax_policy_department_mapping, datas=result
            )
            logger.info(
                f"department「{len(result)}」新增 {self.result_tax_policy_department_mapping}「{row_count}」"
            )

    def process_department(self, df):
        logger.info("开始处理 发布部门")
        df_pro = df.copy()
        df_pro = df_pro.apply(self._fill_department, axis=1)  # 填充标准部门
        self._add_new_department(df_pro)  # 映射部门 入库
        df = self._fill_department_by_database(df_pro)  # 查库重新映射
        return df

    @staticmethod
    def process_hierarchy(series):
        department, province, city, county = series[
            ["department", "province", "city", "county"]
        ]
        if all([province, city, county]):
            series["hierarchy"] = "区级"
        elif all([province, city]):
            series["hierarchy"] = "市级"
        elif all([province]):
            series["hierarchy"] = "省级"
        return series

    def sub_run(self, df_pro, data_type="subsidy"):
        # subsidy policy
        df_pro = self.process_html(df_pro)
        df_pro["data_type"] = data_type  # 设置数据类型
        if data_type == "subsidy":
            if not df_pro.empty:
                df_pro = self.filter_is_policy(df_pro, label_name="是否规范性文件")
                df_pro = df_pro[df_pro["is_policy"]]
                if not df_pro.empty:
                    df_pro = self.filter_is_policy(df_pro, label_name="扶持补贴")
            # if not df_pro[~df_pro['is_policy']].empty:
            #     df_pro_copy = self.filter_is_policy(df_pro[~df_pro['is_policy']], label_name='扶持补贴')
            #     df_pro = pd.concat([df_pro[df_pro['is_policy']], df_pro_copy[df_pro_copy['is_policy']]])
            #     df_pro.drop_duplicates(inplace=True)
            if not df_pro.empty:
                df_pro = df_pro[df_pro["is_policy"]]
        else:
            df_pro["is_policy"] = 1
        logger.info(f"文件过滤完成 数量{df_pro.shape[0]}")
        if df_pro.empty:
            logger.info("数据被过滤完了！")
        else:
            df_pro = self.process_state(df_pro)
            df_pro = df_pro[(df_pro["state"] >= 0)]
            # if data_type == 'subsidy':
            #     df_pro = df_pro[(df_pro['publish_date'] > f"{(datetime.datetime.now().year - 5)}-01-01")]
            df_pro = self.process_mark(df_pro)
            df_pro = self.process_time(df_pro)
            df_pro = self.process_set_dup_by_title(df_pro)
            # df_pro = self.process_department(df_pro)  # 会替换掉原来的省市区
            df_pro = df_pro.apply(self.process_hierarchy, axis=1)
        return df_pro

    def run(self):
        province = "广东省"
        city = "广州市"
        src_data = self.get_data(
            table=self.net_tax_policy,
            field="document_id,publish_no,title,publish_date,department,content,source_url,source,province,city,county,park,state",
            # conditions="document_id in ('c451b15114a62bbcfc956a3950434b42', '135e84b89764d24b5d9e3f38a72852de', '5c1531f0ba6411e017179adeb7de2f81') and source!='国家税务总局'",
            conditions="document_id = '410ac1d2910825a5a503f4ba968adf85'",
            # conditions=f"update_time > '2023-07-01' limit 20000,48553",
            # conditions="source='国家税务总局'",
        )
        logger.info(f"{len(src_data)} rows found")
        df_pro = pd.DataFrame(src_data)
        df_pro = self.sub_run(df_pro, data_type="subsidy")
        self.save(
            df_pro,
            save_table=self.result_tax_policy,
            skip_fix_condition="mark=1",
            update_columns=[
                "hierarchy",
                "department",
                "title",
                "state",
                "publish_no",
                "is_policy",
                "data_type",
                "content",
            ],
            batch_size=100,
        )

    def run_one(self, sub_id_str):
        src_data = self.get_data(
            table=self.net_tax_policy,
            field="document_id,publish_no,title,publish_date,department,content,source_url,source,province,city,county,park,state",
            # conditions="document_id in ('c451b15114a62bbcfc956a3950434b42', '135e84b89764d24b5d9e3f38a72852de', '5c1531f0ba6411e017179adeb7de2f81') and source!='国家税务总局'",
            # conditions=f"update_time > '2023-07-01' limit 20000",
            # conditions=f"update_time > '2023-07-01' limit 20000,48553",
            conditions=f" id in ({sub_id_str})",
            # conditions="source='国家税务总局'",
        )
        logger.info(f"{len(src_data)} rows found")
        df_pro = pd.DataFrame(src_data)
        df_pro = self.sub_run(df_pro, data_type="subsidy")
        df_pro = df_pro.replace({np.NAN: None})
        self.save(
            df_pro,
            save_table=self.result_tax_policy,
            skip_fix_condition="mark=1",
            update_columns=[
                "hierarchy",
                "department",
                "title",
                "state",
                "publish_no",
                "is_policy",
                "data_type",
                "content",
            ],
            batch_size=100,
        )

    def run_more(self, time_flag):
        all_id = self.db.find(
            f"select id from net_tax_policy where update_time > '{time_flag}'",
            to_json=True,
        )
        batch_size = 1000
        logger.info(f"{len(all_id)} task start")
        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
            for i in range(0, len(all_id) // batch_size + 1):
                sub_id = all_id[i * batch_size:(i + 1) * batch_size]
                sub_id_str = ",".join([f"'{i['id']}'" for i in sub_id])
                if sub_id_str:
                    executor.submit(self.run_one, sub_id_str)


if __name__ == "__main__":
    run_time = str((datetime.datetime.now() + datetime.timedelta(days=-7)).date())
    FitterKeywords().run_more(run_time)
