import datetime
import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_govlist_callback",
    "policy_govlist1_callback",
    "policy_govlist2_callback",
    "policy_govlist3_callback",
    "policy_govlist4_callback",
    "policy_govlist5_callback",
    "policy_govlist6_callback",
    "policy_govarticle_callback",
    "policy_govarticle_etl_callback",
    "policy_ndrclist_callback",
    "policy_ndrclist1_callback",
    "policy_ndrclist2_callback",
    "policy_ndrcarticle_callback",
    "policy_ndrcarticle_etl_callback",
    "policy_mostlist_callback",
    "policy_mostlist1_callback",
    "policy_mostarticle_callback",
    "policy_mostarticle_etl_callback",
    "policy_mohrsslist_callback",
    "policy_mohrsslist1_callback",
    "policy_mohrssarticle_callback",
    "policy_mohrssarticle_etl_callback",
    "policy_meelist_callback",
    "policy_meelist1_callback",
    "policy_meearticle_callback",
    "policy_meearticle_etl_callback",
    "policy_motlist_callback",
    "policy_motlist1_callback",
    "policy_motarticle_callback",
    "policy_motarticle_etl_callback",
    "policy_moalist_callback",
    "policy_moalist1_callback",
    "policy_moaarticle_callback",
    "policy_moaarticle_etl_callback",
    "policy_mctlist_callback",
    "policy_mctarticle_callback",
    "policy_mctarticle_etl_callback",
    "policy_moelist_callback",
    "policy_moearticle_callback",
    "policy_moearticle_etl_callback",
    "policy_mcalist_callback",
    "policy_mcaarticle_callback",
    "policy_mcaarticle_etl_callback",
    "policy_mcalist1_callback",
    "policy_mcaarticle1_callback",
    "policy_mcaarticle1_etl_callback",
    "policy_moflist_callback",
    "policy_moflist1_callback",
    "policy_mofarticle_callback",
    "policy_mofarticle_etl_callback",
    "policy_mnrlist_callback",
    "policy_mnrlist1_callback",
    "policy_mnrarticle_callback",
    "policy_mnrarticle_etl_callback",
    "policy_mohurdlist_callback",
    "policy_mohurdlist1_callback",
    "policy_mohurdlist2_callback",
    "policy_mohurdlist3_callback",
    "policy_mohurdarticle_callback",
    "policy_mohurdarticle_etl_callback",
    "policy_mwrlist_callback",
    "policy_mwrarticle_callback",
    "policy_mwrarticle_etl_callback",
    "policy_mofcomlist_callback",
    "policy_mofcomarticle_callback",
    "policy_mofcomarticle_etl_callback",
    "policy_nhclist_callback",
    "policy_nhcarticle_callback",
    "policy_memlist_callback",
    "policy_memarticle_callback",
    "policy_memarticle_etl_callback",
    "policy_sasaclist_callback",
    "policy_sasacarticle_callback",
    "policy_sasacarticle_etl_callback",
    "policy_sasaclist1_callback",
    "policy_sasacarticle1_callback",
    "policy_sasacarticle1_etl_callback",
    "policy_samrlist_callback",
    "policy_samrlist1_callback",
    "policy_samrarticle_callback",
    "policy_samrarticle_etl_callback",
    "policy_chinataxlist_callback",
    "policy_chinataxlist1_callback",
    "policy_chinataxlist2_callback",
    "policy_chinataxlist3_callback",
    "policy_chinataxarticle_callback",
    "policy_chinataxarticle_etl_callback",
    "policy_nhsalist_callback",
    "policy_nhsaarticle_callback",
    "policy_nhsaarticle_etl_callback",
    "policy_ggjlist_callback",
    "policy_ggjarticle_callback",
    "policy_ggjarticle_etl_callback",
    "policy_cbirclist_callback",
    "policy_cbirclist1_callback",
    "policy_cbircarticle_callback",
    "policy_cbircarticle_etl_callback",
    "policy_neriscsrclist_callback",
    "policy_neriscsrcarticle_callback",
    "policy_neriscsrcarticle_etl_callback",
    "policy_neriscsrclist1_callback",
    "policy_neriscsrcarticle1_callback",
    "policy_neriscsrcarticle1_etl_callback",
    "policy_xzfgmojlist_callback",
    "policy_xzfgmojarticle_callback",
    "policy_xzfgmojarticle_etl_callback",

    "policy_beijinglist_callback",
    "policy_beijinglist1_callback",
    "policy_beijinglist2_callback",
    "policy_beijingarticle_callback",
    "policy_beijingarticle_etl_callback",
    "policy_tianjinlist_callback",
    "policy_tianjinlist1_callback",
    "policy_tianjinarticle_callback",
    "policy_tianjinarticle_etl_callback",
    "policy_shanghailist_callback",
    "policy_shanghailist1_callback",
    "policy_shanghaiarticle_callback",
    "policy_shanghaiarticle_etl_callback",
    "policy_chongqinglist_callback",
    "policy_chongqinglist1_callback",
    "policy_chongqingarticle_callback",
    "policy_chongqingarticle_etl_callback",
    "policy_chongqingarticle1_callback",
    "policy_chongqingarticle1_etl_callback",
    "policy_hebeilist_callback",
    "policy_hebeiarticle_callback",
    "policy_hebeiarticle_etl_callback",
    "policy_liaoninglist_callback",
    "policy_liaoninglist1_callback",
    "policy_liaoningarticle_callback",
    "policy_liaoningarticle_etl_callback",
    "policy_jilinlist_callback",
    "policy_jilinlist1_callback",
    "policy_jilinarticle_callback",
    "policy_jilinarticle_etl_callback",
    "policy_heilongjianglist_callback",
    "policy_heilongjianglist1_callback",
    "policy_heilongjiangarticle_callback",
    "policy_heilongjiangarticle_etl_callback",
    "policy_jiangsulist_callback",
    "policy_jiangsuarticle_callback",
    "policy_jiangsuarticle_etl_callback",
    "policy_zhejianglist_callback",
    "policy_zhejianglist1_callback",
    "policy_zhejianglist2_callback",
    "policy_zhejiangarticle_callback",
    "policy_zhejiangarticle_etl_callback",
    "policy_anhuilist_callback",
    "policy_anhuiarticle_callback",
    "policy_anhuiarticle_etl_callback",
    "policy_fujianlist_callback",
    "policy_fujianarticle_callback",
    "policy_fujianarticle_etl_callback",
    "policy_jiangxilist_callback",
    "policy_jiangxiarticle_callback",
    "policy_jiangxiarticle_etl_callback",
    "policy_shandonglist_callback",
    "policy_shandonglist1_callback",
    "policy_shandonglist2_callback",
    "policy_shandongarticle_callback",
    "policy_shandongarticle_etl_callback",
    "policy_henanlist_callback",
    "policy_henanlist1_callback",
    "policy_henanlist2_callback",
    "policy_henanarticle_callback",
    "policy_henanarticle_etl_callback",
    # "policy_hubeilist_callback",
    # "policy_hubeiarticle_callback",
    "policy_hubeiarticle_etl_callback",
    "policy_hunanlist_callback",
    "policy_hunanarticle_callback",
    "policy_hunanarticle_etl_callback",
    "policy_guangdonglist_callback",
    "policy_guangdongarticle_callback",
    "policy_guangdongarticle_etl_callback",
    "policy_hainanlist_callback",
    "policy_hainanarticle_callback",
    "policy_hainanarticle_etl_callback",
    "policy_sichuanlist_callback",
    "policy_sichuanlist1_callback",
    "policy_sichuanarticle_callback",
    "policy_sichuanarticle_etl_callback",
    "policy_guizhoulist_callback",
    "policy_guizhouarticle_callback",
    "policy_guizhouarticle_etl_callback",
    "policy_yunnanlist_callback",
    "policy_yunnanarticle_callback",
    "policy_yunnanarticle_etl_callback",
    "policy_shaanxilist_callback",
    "policy_shaanxiarticle_callback",
    "policy_shaanxiarticle_etl_callback",
    "policy_qinghailist_callback",
    "policy_qinghaiarticle_callback",
    "policy_qinghaiarticle_etl_callback",
    "policy_neimenglist_callback",
    "policy_neimenglist1_callback",
    "policy_neimengarticle_callback",
    "policy_neimengarticle_etl_callback",
    "policy_guangxilist_callback",
    "policy_guangxilist1_callback",
    "policy_guangxiarticle_callback",
    "policy_guangxiarticle_etl_callback",
    "policy_xizanglist_callback",
    "policy_xizangarticle_callback",
    "policy_xizangarticle_etl_callback",
    "policy_ningxialist_callback",
    "policy_ningxiaarticle_callback",
    "policy_ningxiaarticle_etl_callback",
    "policy_shanxilist_callback",
    "policy_shanxiarticle_callback",
    "policy_shanxiarticle_etl_callback",
    "policy_xinjianglist_callback",
    "policy_xinjianglist1_callback",
    "policy_xinjiangarticle_callback",
    "policy_xinjiangarticle_etl_callback",
    "policy_fzggwzjlist_callback",
    "policy_fzggwzjarticle_callback",
    "policy_fzggwzjarticle_etl_callback",
    "policy_jxtzjlist_callback",
    "policy_jxtzjarticle_callback",
    "policy_jxtzjarticle_etl_callback",
    "policy_kjtzjlist_callback",
    "policy_kjtzjarticle_callback",
    "policy_kjtzjarticle_etl_callback",
    "policy_jytzjlist_callback",
    "policy_jytzjarticle_callback",
    "policy_jytzjarticle_etl_callback",
    "policy_mztzjlist_callback",
    "policy_mztzjarticle_callback",
    "policy_mztzjarticle_etl_callback",
    "policy_cztzjlist_callback",
    "policy_cztzjarticle_callback",
    "policy_cztzjarticle_etl_callback",
    "policy_rlsbtzjlist_callback",
    "policy_rlsbtzjarticle_callback",
    "policy_rlsbtzjarticle_etl_callback",
    "policy_nynctzjlist_callback",
    "policy_nynctzjarticle_callback",
    "policy_nynctzjarticle_etl_callback",
    "policy_jstzjlist_callback",
    "policy_jstzjarticle_callback",
    "policy_jstzjarticle_etl_callback",
    "policy_wsjkwzjlist_callback",
    "policy_wsjkwzjarticle_callback",
    "policy_wsjkwzjarticle_etl_callback",
    "policy_hangzhoulist_callback",
    "policy_hangzhouarticle_callback",
    "policy_hangzhouarticle_etl_callback",
    "policy_ningbolist_callback",
    "policy_ningboarticle_callback",
    "policy_ningboarticle_etl_callback",
    "policy_wenzhoulist_callback",
    "policy_wenzhouarticle_callback",
    "policy_wenzhouarticle_etl_callback",
    "policy_huzhoulist_callback",
    "policy_huzhouarticle_callback",
    "policy_huzhouarticle_etl_callback",
    "policy_jiaxinglist_callback",
    "policy_jiaxingarticle_callback",
    "policy_jiaxingarticle_etl_callback",
    "policy_sxlist_callback",
    "policy_sxarticle_callback",
    "policy_sxarticle_etl_callback",
    "policy_jinhualist_callback",
    "policy_jinhua1list_callback",
    "policy_jinhuaarticle_callback",
    "policy_jinhuaarticle_etl_callback",
    "policy_qzlist_callback",
    "policy_qzlist1_callback",
    "policy_qzarticle_callback",
    "policy_qzarticle_etl_callback",
    "policy_zhoushanlist_callback",
    "policy_zhoushanarticle_callback",
    "policy_zhoushanarticle_etl_callback",
    "policy_zjtzlist_callback",
    "policy_zjtzarticle_callback",
    "policy_zjtzarticle_etl_callback",
    "policy_lishuilist_callback",
    "policy_lishuiarticle_callback",
    "policy_lishuiarticle_etl_callback",

    "policy_pbcarticle_etl_callback",
    "policy_miitlist_callback",
    "policy_miitlist1_callback",
    "policy_miitarticle_callback",
    "policy_miitarticle_etl_callback",
    "policy_nhcarticle_etl_callback",
    "policy_customsarticle_etl_callback",
    "policy_gansuarticle_etl_callback",
]



def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


#   中央人民政府网
def policy_govlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = max([
            data['searchVO']['catMap']['gongwen']['totalCount'],
            data['searchVO']['catMap']['zhongyangfile']['totalCount'],
            data['searchVO']['catMap']['otherfile']['totalCount'],
        ])
        total_page = max_count // 5 + 1
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = list()
        items.extend(data['searchVO']['catMap']['gongwen']['listVO'])
        items.extend(data['searchVO']['catMap']['zhongyangfile']['listVO'])
        items.extend(data['searchVO']['catMap']['otherfile']['listVO'])
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['url']
            rawid = re.findall('content_(.*?).htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99001'
            article_json["url"] = url
            article_json["title"] = item.get('title', '')
            article_json["pubtimeStr"] = item.get('pubtimeStr', '')
            article_json["pcode"] = item.get('pcode', '')
            article_json["childtype"] = item.get('childtype', '')
            article_json["index"] = item.get('index', '')
            article_json["puborg"] = item.get('puborg', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_govlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<li>共(\d+)', para_dicts["data"]["1_1"]['html'])
        total_page = int(max_count[0]) if max_count else 79
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = '{}'
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="listTxt"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('h4/a/@href').extract_first()
            base_url = f'http://sousuo.gov.cn/column/30469/1.htm'
            url = parse.urljoin(base_url, href)
            if 'content' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99001'
            article_json["url"] = url
            article_json["title"] = li.xpath('h4/a/text()').extract_first()
            article_json["pubtimeStr"] = li.xpath('h4/span/text()').extract_first()
            article_json["pcode"] = ''
            article_json["childtype"] = ''
            article_json["index"] = ''
            article_json["puborg"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_govlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>共(\d+)页<', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"zhutitag_total":(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/10)
        else:
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'zhengce/xxgk/ghxx' in callmodel.sql_model.list_rawid:
                    dic = {"url_part": f"{list_json['url_part']}","page_info": f"{list_json['page_info']}"}
                elif 'search-zhengce' in callmodel.sql_model.list_rawid:
                    dic = {"url_part": f"{list_json['url_part']}","page_info": f"page_index={page+1}"}
                else:
                    dic = {"url_part": f"{list_json['url_part']}","page_info": f"/{page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'search-zhengce' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['data']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                base_url = f'http://www.gov.cn/zhengce/xxgk/index.htm'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99001'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pubtimeStr"] = li['pubtime']
                article_json["pcode"] = li['tagno']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//ul[@class="listTxt"]/li|//div[@class="list list_1 list_2"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('h4/a/@href').extract_first()
                if not href:
                    continue
                if 'xxgk/ghxx' in callmodel.sql_model.list_rawid:
                    base_url = f'http://www.gov.cn/zhengce/xxgk/ghxx/gjfzzlhgh.htm'
                else:
                    base_url = f'http://sousuo.gov.cn/column/{callmodel.sql_model.list_rawid}/0.htm'
                url = parse.urljoin(base_url, href)
                if 'content' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99001'
                article_json["url"] = url
                article_json["title"] = li.xpath('h4/a/text()').extract_first().strip()
                if 'zhengce/xxgk/ghxx' in callmodel.sql_model.list_rawid:
                    article_json["pubtimeStr"] = ''
                else:
                    article_json["pubtimeStr"] = li.xpath('h4/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_govlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            total_page = int(html_json['result']['data']['pager']['pageCount'])
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['result']['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('doc_pub_url', "")
            if not href:
                href = li.get('f_2022916202501', "")
            if not href:
                continue
            base_url = f'http://www.gov.cn/zhengce/xxgk/gjgzk/ssgz.htm?=undefined&dataTypeId=92'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            # rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            rawid = li['_id']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99001'
            article_json["url"] = url
            article_json["title"] = li['f_202289693314']
            organ = li.get('f_2022916396656', "")
            if not organ:
                organ = li['f_20221110328821']
            article_json["organ"] = organ
            article_json["pub_date"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_govlist4_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            total_page = int(html_json['result']['data']['pager']['pageCount'])
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['result']['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('pub_url', "")
            if not href:
                continue
            base_url = f'https://www.gov.cn/zhengce/xxgk/'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            # rawid = li['_id']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99001'
            article_json["url"] = url
            article_json["title"] = li['maintitle']
            article_json["organ"] = '国务院办公厅'
            article_json["pub_date"] = li['publish_time']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_govlist5_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('nPageCount = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"home_{page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="news_box"]/div/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('h4/a/@href').extract_first()
            base_url = f'https://www.gov.cn/zhengce/zuixin/home_1.htm'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if '.htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99001'
            article_json["url"] = url
            article_json["title"] = li.xpath('h4/a/text()').extract_first().strip()
            pub_date = li.xpath('h4/span[@class="date"]/text()').extract_first()
            article_json["pub_date"] = pub_date.replace('[', '').replace(']', '')

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result

def policy_govlist6_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//table/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'https://www.nppa.gov.cn/xxfb/zcfg/gfxwj/index.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if '.htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99001'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[2]/text()').extract_first().strip()

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_govarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_govarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pubtimeStr', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if 'www.nppa.gov.cn' in provider_url:
        pub_date_info = clean_pubdate(article_json.get('pub_date', ''))
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
        fulltext_xpath = '//div[@class="tpl-content"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    elif article_json.get('organ', ''):
        fulltext_xpath = '//div[@id="UCAP-CONTENT"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="detail-con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="fgk_text"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzk_conentdwc_nr"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzk-xl-article"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzk-con-c"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzk-content"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzk-article-con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzk-detail-content"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzkMainCon pr"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzkwz_con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gzxltx"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gz-zw"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gz-detail-con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="gznr-cont"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="gz"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="g24 content"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="hdsrmzf-count clearfix"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[contains(@class,"j-fontContent ")]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="info_content"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="kr-body"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="main_article"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="m-txt-article"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="mt60 pl35 pr35 pb35"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="mc ohd clearfix"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="m-gk-artcle gz-article"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="nxgz-detail-con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="newsconParse"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="ruleTextC"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="rule_main"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="section_zc"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="sxgzk-detail-inn"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="xxgkgzwjk-xqy-nr"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="xxgk-container"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="ycszfgz-detail-con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="zoom"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@id="zoomcon"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="zc_article_con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="zfxxgk_content"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="zfxxgk_gzk_xl_articleCon"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="zwnr"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="u-con"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="view zfxxgk_zn"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="wjgk detailCont"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="wz_zhuti"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="wz3"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="wenZhang"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="y-content"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="article-body"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="articleCon"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="article no_border"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="article"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="body"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="content-body"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="content-wrap"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            fulltext_xpath = '//div[@class="content"]|//div[@id="content"]|//div[@id="content_div"]'
            fulltext = res.xpath(fulltext_xpath).extract_first()
        if 'huangshi.' in provider_url:
            fulltext_xpath = '//div[@id="artTit"]|//div[@id="artCon"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if '.dt.' in provider_url:
            fulltext_xpath = '//h3[@id="contetntitle"]|//h4/ucaptitle/parent::h4|//div[@class="articlecontent"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'anshan.' in provider_url or 'dandong.' in provider_url or 'chaoyang.' in provider_url:
            fulltext_xpath = '//div[@class="text01"]|//div[@class="text02"]|//div[@class="text"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'yingtan.' in provider_url:
            fulltext_xpath = '//div[@class="zw_title"]|//div[@class="wzzw"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'xianning.' in provider_url:
            fulltext_xpath = '//div[@class="gz_header_titbg"]|//div[@class="article-con"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'chengde.' in provider_url or 'cangzhou.' in provider_url:
            fulltext_xpath = '//div[@class="gz_title"]|//div[@class="gz_two_title"]|//div[@class="gz_info"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'ganzhou.' in provider_url:
            fulltext_xpath = '//div[@class="gz-title"]|//div[@class="gz-subtitle"]|//div[@class="gz-article"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'yichun.' in provider_url:
            fulltext_xpath = '//div[@class="gz_title"]|//div[@class="show_content"]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if 'weifang.' in provider_url or 'hanzhong.' in provider_url:
            fulltext_xpath = '//div[@class="rule_up"]/following::div[1]'
            fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
        if not fulltext:
            raise Exception
        date_dict = {"O": "0", "〇": "0", "○": "0", "Ｏ": "0", "0": "0", "零": "0", "一": "1", "二": "2", "三": "3",
                     "四": "4", "五": "5", "六": "6", "七": "7", "八": "8", "九": "9", "十": "10", "十一": "11",
                     "十二": "12", "十三": "13", "十四": "14", "十五": "15", "十六": "16", "十七": "17",
                     "十八": "18", "十九": "19", "一十": "10", "一十一": "11", "一十二": "12", "一十三": "13",
                     "一十四": "14", "一十五": "15", "一十六": "16", "一十七": "17", "一十八": "18", "一十九": "19",
                     "二十": "20", "二十一": "21", "二十二": "22", "二十三": "23", "二十四": "24", "二十五": "25",
                     "二十六": "26", "二十七": "27", "二十八": "18", "二十九": "29", "廿": "20", "廿一": "21",
                     "廿二": "22", "廿三": "23", "廿四": "24", "廿五": "25", "廿六": "26", "廿七": "27",
                     "廿八": "18", "廿九": "29", "三十": "30", "三十一": "31", }
        fulltext_str_info = ''.join(res.xpath(f'({fulltext_xpath})//text()').extract())
        fulltext_str = re.sub('\s', '', fulltext_str_info)
        re_str = '[O〇○Ｏ0零一二三四五六七八九十廿]{4}年[O〇○Ｏ0零一二三四五六七八九十廿]{1,3}月[O〇○Ｏ0零一二三四五六七八九十廿]{1,3}[日,号]|\d{4}年\d{1,2}月\d{1,2}[日,号]|\d{4}[\-,/]\d{1,2}[\-,/]\d{1,2}'
        pub_date_info = re.findall(re_str, fulltext_str)
        if not pub_date_info:
            if 'www.gov.cn/zhengce' in provider_url:
                pub_date = clean_pubdate(provider_url)
                pub_year = pub_date[:4]
            else:
                raise Exception
        else:
            date_list = list()
            for pub_dat in pub_date_info:
                pub_dat_spl = re.split('[年,月, 日,号,\-,/]', pub_dat)
                year = ''.join([date_dict.get(x, x) for x in pub_dat_spl[0]])
                mouth = date_dict.get(pub_dat_spl[1], pub_dat_spl[1]).rjust(2, '0')
                day = date_dict.get(pub_dat_spl[2], pub_dat_spl[2]).rjust(2, '0')
                pub_date = year + mouth + day
                date_list.append(pub_date)
            now_date = time.strftime('%Y%m%d', time.localtime())
            for pub_date in date_list:
                if pub_date > now_date:
                    pub_date = ''
                    continue
                pub_date = str(int(pub_date))
                pub_year = pub_date[:4]
                break
        if not pub_date:
            pub_date_info = ''.join(res.xpath('//span[@class="date"]/b/text()|//span[@class="time"]/text()').extract()).strip()
            pub_date = clean_pubdate(pub_date_info)
            pub_year = pub_date[:4]
        if not pub_date:
            raise Exception
    else:
        fulltext_xpath = '//td[@class="b12c"]|//div[@class="pages_content"]|//td[@class="p1"]|//div[@id="UCAP-CONTENT"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            raise Exception
        title = cleaned(res.xpath('//b[contains(text(),"标　　题")]/parent::td/following::td[1]/text()').extract_first())
        if not title:
            title = cleaned(res.xpath('//h1/text()').extract_first())
        if not title:
            title = article_json['title'].strip()
        if not pub_date:
            pub_date_info = ''.join(res.xpath('//div[@class="pages-date"]/text()').extract()).strip()
            pub_date = clean_pubdate(pub_date_info)
            pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//b[contains(text(),"发布日期：")]/parent::td/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]

    pub_no = ''.join(res.xpath('//b[contains(text(),"发文字号：")]/parent::td/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//b[contains(text(),"索  引  号：")]/parent::td/following::td[1]/text()').extract()).strip()
    if not index_no:
        index_no = ''.join(res.xpath('//b[contains(text(),"索 引 号：")]/parent::td/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//b[contains(text(),"发文机关：")]/parent::td/following::td[1]/text()').extract()).strip()
    if article_json.get('organ', ''):
        organ = article_json.get('organ', '')
    written_date = cleaned(res.xpath('//b[contains(text(),"成文日期")]/parent::td/following::td[1]/text()').extract_first())
    subject_word = cleaned(res.xpath('//b[contains(text(),"主  题  词：")]/parent::td/following::td[1]/text()').extract_first())
    subject = ''.join(res.xpath('//b[contains(text(),"主题分类：")]/parent::td/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//b[contains(text(),"时　　效：")]/parent::td/following::td[1]//text()').extract()).strip()

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99001'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CNGOV"
    zt_provider = "cngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国国家发展和改革委员会
def policy_ndrclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['data']['totalHits']
        total_page = math.ceil(max_count/20)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['data'].get('resultList', [])
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['url']
            if '.pdf' in url or '.doc' in url or "ndrc" not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99002'
            article_json["url"] = url
            article_json["title"] = item.get('title', '')
            article_json["docDate"] = item.get('docDate', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ndrclist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>共(\d+)页，', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="zwxxkg-result"]/table/tbody/tr')[1:]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'https://zfxxgk.ndrc.gov.cn/web/dirlist.jsp'
            url = parse.urljoin(base_url, href)

            rawid = url.split('id=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99002'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["docDate"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ndrclist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="u-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.ndrc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99002'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["docDate"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ndrcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ndrcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['docDate'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//table[@class="info-table"]//td[contains(text(),"事项名称：")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//b/font[@size="5"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="info-table"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"是否有效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="info-table"]//td[contains(text(),"主办单位：")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="article"]|//div[@class="zhengwen"]|//div[contains(@class,"article_con")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99002'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NDRC"
    zt_provider = "ndrccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="attachment"])')
    file_info3 = get_file_info(data, res, f'(//table[@class="enclosure"])')
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国科学技术部
def policy_mostlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+);', para_dicts["data"]["1_1"]['html'])
        total_page = int(max_count[0]) if max_count else 46
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@id="data_list"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.most.gov.cn/xxgk/xinxifenlei/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99003'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            article_json["file_num"] = li.xpath('div[2]/text()').extract_first()
            article_json["create_date"] = li.xpath('div[3]/text()').extract_first()
            article_json["effect"] = li.xpath('div[4]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mostlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list-main"]/ul/li|//ul[@id="data_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.most.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99003'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["create_date"] = ''.join(li.xpath('span/text()|div[contains(@class,"w_list_rq")]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mostarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mostarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']

    res = Selector(text=html)
    title = "".join(res.xpath('//b[contains(text(),"标　　题")]/parent::td/following::td[1]//text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//div[@class="xxgk_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(res.xpath('//b[contains(text(),"发布日期")]/parent::td/following::td[1]/text()').extract_first())
    pub_year = pub_date[:4]
    organ = cleaned(res.xpath('//b[contains(text(),"发文机构")]/parent::td/following::td[1]/text()').extract_first())
    pub_no = cleaned(res.xpath('//b[contains(text(),"发文字号")]/parent::td/following::td[1]/text()').extract_first())
    written_date = cleaned(res.xpath('//b[contains(text(),"成文日期")]/parent::td/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//b[contains(text(),"有 效 性")]/parent::td/following::td[1]/text()').extract_first())
    index_no = cleaned(res.xpath('//b[contains(text(),"索  引  号：")]/parent::td/following::td[1]/text()').extract_first())
    if not pub_date:
        pub_date = clean_pubdate(res.xpath('//div[@class="pages-date"]/text()').extract_first())
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(''.join(res.xpath('//div[@class="gray12 lh22"]/text()').extract()))
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    if not organ:
        organ_info = res.xpath('//table[@class="bd1"]/tbody/tr[2]/td[2]/text()|//div[@class="pages-date"]/span/text()').extract_first()
        organ = cleaned(organ_info).split('：')[-1].strip()

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99003'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOST"
    zt_provider = "mostcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国人力资源和社会保障部
def policy_mohrsslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('m_nRecordCount = (\d+);', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 931
        total_page = math.ceil(max_count/20)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="iframe-list"]//td')
        for index in range(0, len(li_list), 3):
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li_list[index+1].xpath('a/@href').extract_first()
            url = parse.urljoin('http://www.mohrss.gov.cn/xxgk2020/zcwjk/', href)
            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99004'
            article_json["url"] = url
            article_json["title"] = li_list[index+1].xpath('a/text()').extract_first()
            article_json["file_num"] = li_list[index+2].xpath('text()').extract_first()
            article_json["create_date"] = li_list[index].xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mohrsslist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('m_nRecordCount = (\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/20)
        else:
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'govsearch' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"page={page+1}"}
                else:
                    dic = {"page_info": list_json['page_info'].replace('.', f"_{page}.")}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="serviceMainListConType"]/div|//div[@id="iframe-list"]/table/tr|//ul[@class="rsb_con_rightUl"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[2]/span/a/@href|td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            if 'govsearch' in callmodel.sql_model.list_rawid:
                base_url = f'http://www.mohrss.gov.cn/xxgk2020/zcwjk/'
            else:
                base_url = f'http://www.mohrss.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99004'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[2]/span/a/text()|td[2]/a/text()|a/text()').extract_first().strip()
            article_json["create_date"] = li.xpath('div[3]/span/text()|td[1]/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mohrssarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mohrssarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="artT"]//text()|//div[@class="insMainConTitle_b"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"发文字号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"分") and contains(text(),"类")]/following::div[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"是否有效")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"发布单位")]/following::div[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="gz_content"]|//div[@id="insMainConTxt"]|//div[contains(@class,"art_p")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99004'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOHRSS"
    zt_provider = "mohrsscngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国生态环境部
def policy_meelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('m_nRecordCount = (\d+);', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 14558
        total_page = max_count//20 + 1
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="iframe-list"]//td')
        for index in range(0, len(li_list), 3):
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li_list[index+1].xpath('a/@href').extract_first()
            url = parse.urljoin('https://www.mee.gov.cn/govsearch/wenjiankujs.jsp', href)
            rawid = re.findall('_(.*?)\.', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99005'
            article_json["url"] = url
            article_json["title"] = li_list[index+1].xpath('a/text()').extract_first()
            article_json["file_num"] = li_list[index+2].xpath('text()').extract_first()
            article_json["create_date"] = li_list[index].xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_meelist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="div"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.mee.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99005'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["create_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_meearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_meearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = "".join(
        res.xpath('//div[@class="content_top_box"]//span[contains(text(),"名　　称")]/parent::div[1]/p/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//h1[@class="cjcs_phone_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@class="content_top_box"]//span[contains(text(),"文　　号")]/parent::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[@class="content_top_box"]//span[contains(text(),"索 引 号")]/parent::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[@class="content_top_box"]//span[contains(text(),"分　　类")]/parent::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="content_top_box"]//span[contains(text(),"生成日期")]/parent::div[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath(
    #     '//div[@class="info_table"]//div[contains(text(),"是否有效")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="content_top_box"]//span[contains(text(),"发布机关")]/parent::div[1]/i/text()').extract()).strip()

    fulltext_xpath = '//div[@class="content_body_box"]|//div[@class="neiright_JPZ_GK_CP"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99005'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MEE"
    zt_provider = "meecngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    new_file = list()
    for i in file_info:
        if '&sign=' in i['url'] or 'html?keywords=' in i['url']:
            continue
        new_file.append(i)
    if new_file:
        di_model_bef.update.update({"other_dicts": json.dumps(new_file, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国交通运输部
def policy_motlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+),', para_dicts["data"]["1_1"]['html'])
        total_page = int(max_count[0]) if max_count else 67
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"list_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="overview"]/ul/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            url = parse.urljoin('https://xxgk.mot.gov.cn/2020/jigou/', href)
            rawid = re.findall('_(.*?)\.', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99006'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            article_json["create_date"] = li.xpath('a/font/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_motlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}", "url_part": list_json['url_part']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="fl w100 right_list"]/li|//div[@class="list-group"]/a')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|@href').extract_first()
            if not href:
                continue
            if '2022zhengcejd' in callmodel.sql_model.list_rawid:
                base_url = f'https://www.mot.gov.cn/2022zhengcejd/index.html'
            else:
                base_url = f'https://xxgk.mot.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if '2022zhengcejd' in callmodel.sql_model.list_rawid:
                rawid = url.split('/')[-2]
            else:
                rawid = re.findall('_(.*?)\.', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99006'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/text()|span[1]/text()').extract_first().strip()
            if '2022zhengcejd' in callmodel.sql_model.list_rawid:
                article_json["create_date"] = ''
            else:
                article_json["create_date"] = li.xpath('a/font/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_motarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_motarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="fl w100 gkzn_right_tit"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="container"]//h4//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath(
        '//div[@class="fl w100 main_xl_header hidden-xs"]//label[contains(text(),"文号：")]/following::div[1]/p/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[@class="fl w100 main_xl_header hidden-xs"]//label[contains(text(),"索引号：")]/following::div[1]/p/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="fl w100 main_xl_header hidden-xs"]//label[contains(text(),"主题分类：")]/following::div[1]/p/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="fl w100 main_xl_header hidden-xs"]//label[contains(text(),"主题词：")]/following::div[1]/p/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    trade_name = ''.join(res.xpath('//div[@class="fl w100 main_xl_header hidden-xs"]//label[contains(text(),"行业分类：")]/following::div[1]/p/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="fl w100 main_xl_header hidden-xs"]//label[contains(text(),"机构分类：")]/following::div[1]/p/text()').extract()).strip()

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="list-group"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99006'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOT"
    zt_provider = "motcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['trade_name'] = trade_name

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[contains(@class,"fj")])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国农业农村部
def policy_moalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('m_nRecordCount = (\d+);', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 36628
        total_page = max_count//20 + 1
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="commonlist"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            url = parse.urljoin('http://www.moa.gov.cn/govpublic/', href)
            if '.htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99007'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["create_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_moalist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="commonlist"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.moa.gov.cn/{callmodel.sql_model.list_rawid}/index.htm'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99007'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/font/text()|a/text()').extract_first().strip()
            article_json["create_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_moaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_moaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = cleaned(res.xpath('//dt[contains(text(),"信息名称")]/following::dd[1]/text()').extract_first())
    if not title:
        title = "".join(res.xpath('//h2[@class="xxgk_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    new_date = cleaned(res.xpath('//*[@class="pubtime"]/text()').extract_first())
    if new_date:
        pub_date = clean_pubdate(new_date)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(res.xpath('//dt[contains(text(),"发布日期")]/following::dd[1]/text()').extract_first())
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = res.xpath('//p[@class="pubtime"]/text()|//span[contains(text(),"日期：")]/text()').extract_first()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(res.xpath('//div[contains(text(),"发布日期")]/following::div[1]/text()').extract_first())
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dt[contains(text(),"文　　号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dt[contains(text(),"索 引  号")]/following::dd[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="content_head mhide"]//dt[contains(text(),"生效日期")]/following::dd[1]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dt[contains(text(),"所属单位")]/following::dd[1]/text()').extract()).strip()

    fulltext_xpath = '//div[contains(@class,"arc_body")]|//div[@class="txtbox gknb"]|//div[@class="gsj_htmlcon_bot"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99007'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOA"
    zt_provider = "moacngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="nyb_fj nyb_fj1"]|//div[@class="nyb_fj"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国文化和旅游部
def policy_mctlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        total_page = int(max_count[0]) if max_count else 15
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="mesgopen2"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://zwgk.mct.gov.cn/{callmodel.sql_model.list_rawid}/index_3081.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99008'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["create_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mctarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mctarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = "".join(res.xpath('//h2[@class="xxgk_title"]//text()').extract()).strip()
    if len(title) > 100:
        title = ''
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dl/dt[text()="文"]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dl/dt[text()="索"]/following::dd[1]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dl/dt[text()="分"]/following::dd[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="content_head mhide"]//dl/dt[text()="主"]/following::dd[1]/text()').extract()).strip()
    subject_word = re.sub(' +', ';', subject_word)
    organ = ''.join(res.xpath(
        '//div[@class="content_head mhide"]//dl/dt[text()="发文机关："]/following::dd[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="gsj_htmlcon_bot"]|//div[@class="main_htmlcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99008'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MCT"
    zt_provider = "mctcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国教育部
def policy_moelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('recordCount=(\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('recordCount = (\d+);', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count / 20)
        else:
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count / 15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'search' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"page={page+1}"}
                else:
                    dic = {"page_info": f"index_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="list"]/li|//dl[@id="list"]/dd|//div[@class="scy_lbsj-right-nr"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            if 'search' in callmodel.sql_model.list_rawid:
                base_url = f'http://www.moe.gov.cn/was5/web/search?channelid=239993&page=1'
            else:
                base_url = f'http://www.moe.gov.cn/{callmodel.sql_model.list_rawid}index.htm'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            if '_' in  url.split('/')[-1]:
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            else:
                rawid =  url.split('/')[-1].split('.')[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99010'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["create_date"] = ''.join(li.xpath('a/span/text()|span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_moearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_moearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date = clean_pubdate(res.xpath('//td[contains(text(),"生成日期")]/following-sibling::td[1]/text()').extract_first())
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath(
        '//td[contains(text(),"发文字号")]/following-sibling::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//td[contains(text(),"信息索引")]/following-sibling::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//td[contains(text(),"信息类别")]/following-sibling::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"生成日期")]/following-sibling::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//td[contains(text(),"发文机构")]/following-sibling::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="xxgk_content_div"]|//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="moe-detail-box"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="downloadContent"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99010'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOE"
    zt_provider = "moecngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国工业和信息化部
def policy_miitlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['searchResult']['total']
        total_page = math.ceil(tcount/10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['searchResult']['dataResults']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['groupData'][0]['data']['url']
            base_url = f'https://www.miit.gov.cn'
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99011'
            article_json["url"] = url
            article_json["title"] = li['groupData'][0]['data']['title']
            article_json["pub_date"] = li['groupData'][0]['data'].get('publishtime', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_miitlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('count=\\\\"(\d+)', para_dicts["data"]["1_1"]['html'])
        num = re.findall('rows=\\\\"(\d+)', para_dicts["data"]["1_1"]['html'])
        num = int(num[0]) if num else 10
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/num)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        res = Selector(text=html_json['data']['html'])
        # if 'jdhy/zcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="page-content"]//ul/li|//div[@class="page-content"]/div/p')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            # base_url = f'http://fgw.guizhou.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'https://www.miit.gov.cn/api-gateway/jpaas-publish-server/front/page/build/unit'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99011'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_miitarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_miitarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = "".join(res.xpath('//span[contains(text(),"标　　题")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//h1[id="con_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(res.xpath('//span[contains(text(),"发布日期")]/following::span[1]/text()').extract_first())
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = cleaned(res.xpath('//span[contains(text(),"发布时间")]/text()').extract_first())
        pub_date = clean_pubdate(pub_date_info.split('发布时间：')[-1][:10])
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath(
        '//div[@class="xxgk-box"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[@class="xxgk-box"]//span[contains(text(),"分　　类：")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xxgk-box"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="xxgk-box"]//span[contains(text(),"发文机关")]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="con_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99011'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MIIT"
    zt_provider = "miitcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国民政部
def policy_mcalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = json.loads(re.findall('\((.*)\);', para_dicts["data"]["1_1"]['html'])[0])

        total_page = data['totalPageNum']
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['resultMap']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = f"http://xxgk.mca.gov.cn:8011/gdnps/pc/content.jsp?id={item['id']}&mtype=1"
            temp["rawid"] = item['id']
            temp["sub_db_id"] = '99012'
            article_json["url"] = url
            article_json["title"] = item['title']
            article_json["wh"] = item.get('wh', "")
            article_json["publishTime"] = item.get('publishTime', "")
            article_json["syh"] = item.get('syh', "")

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mcaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mcaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    pub_date = cleaned(article_json['publishTime'][:8])
    text = re.findall('\((\{.*?\})\);', html)[0]
    json_info = json.loads(text)['resultMap'][0]

    pub_year = pub_date[:4]
    pub_no = json_info.get('wh', '')
    organ = json_info.get('xxly', '')
    index_no = json_info.get('syh', '')
    subject = json_info.get('subjectName', '')
    subject_word = json_info.get('ztc', '')
    subject_word = re.sub(' +', ';', subject_word)
    fulltext = json_info.get('htmlContent', '')

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99012'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MCA'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mcacngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    res = Selector(text=fulltext)
    file_info = get_file_info(data, res, '')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_mcalist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage = "(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page +1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"?{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@class="article"]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.mca.gov.cn/article/xw/tzgg/'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99012'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["create_date"] = li.xpath('td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mcaarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mcaarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['create_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h3[@class="mtitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99012'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MCA"
    zt_provider = "mcacngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国财政部
def policy_moflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']

        total_page = int(data['pages'])
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['records']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = f"http://fgk.mof.gov.cn/ui/src/views/law_html/{item['id']}.html"
            temp["rawid"] = item['id']
            temp["sub_db_id"] = '99013'
            article_json["url"] = url
            article_json["title"] = item['title']
            article_json["lwh"] = item.get('lwh', "")
            article_json["lBuDate"] = item.get('lBuDate', "")

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_moflist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="xwbd_lianbolistfrcon"]/li|//ul[@class="xwfb_listbox"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.mof.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99013'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["lBuDate"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mofarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mofarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['lBuDate'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(
        res.xpath('//h1[@class="publicForms_title"]//text()|//h2[@class="title_con"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = cleaned(article_json.get('lwh', ''))

    fulltext_xpath = '//div[@class="publicForms_box"]|//div[@class="my_conboxzw"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99013'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOF"
    zt_provider = "mofcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国自然资源部
def policy_mnrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 70
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_3553_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@id="ul"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a[2]/@href').extract_first()
            base_url = f'http://f.mnr.gov.cn/'
            url = parse.urljoin(base_url, href)
            if 'mnr' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99014'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a[2]/text()').extract_first()
            date_info = li.xpath('a/text()').extract_first()
            if '发布' in date_info:
                create_date = re.findall('\d{4}年\d{2}月\d{2}日发布', date_info)
            elif '实施' in date_info:
                create_date = re.findall('\d{4}年\d{2}月\d{2}日实施', date_info)
            else:
                create_date = ''
            article_json["create_date"] = create_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mnrlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="ky_open_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.mnr.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            if '_' in url.split('/')[-1]:
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            else:
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99014'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["create_date"] = ''.join(li.xpath('span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mnrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mnrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('create_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="mid-4"]/span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table//p[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if '="country"' in html:
        organ = cleaned(res.xpath('//div[@class="mid-2"]/span[2]/text()').extract_first())
        pub_no = cleaned(res.xpath('//div[@class="mid-2"]/span[1]/text()').extract_first())
        index_no = ''
        written_date = ''
        legal_status = cleaned(res.xpath('//div[@class="mid-4"]/span[4]/text()').extract_first())
        subject = cleaned(res.xpath('//div[@class="mid-2"]/span[3]/text()').extract_first())
    else:
        pub_no = ''.join(res.xpath('//table//p[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table//p[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table//p[contains(text(),"题")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table//p[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''
        organ = ''.join(res.xpath('//table//p[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
        if organ == '其他':
            organ == ''

    fulltext_xpath = '//div[@id="content"]|//div[@id="content1"]|//div[contains(@class,"art_p")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99014'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MNR"
    zt_provider = "mnrcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国住房和城乡建设部
def policy_mohurdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = int(data['pages'])
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['list']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            temp["rawid"] = item['id']
            temp["sub_db_id"] = '99015'
            if 'mohurd' not in item['url']:
                continue
            article_json["url"] = item['url']
            article_json["title"] = item['title']
            article_json["ofNo"] = item.get('ofNo', "")
            article_json["ofDispatchDate"] = item.get('ofDispatchDate', "")
            article_json["ofIndex"] = item.get('ofIndex', "")

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mohurdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('第.*?/ (\d+) 页<', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="linkList-section-wrapper"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.mohurd.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            if '_' in url.split('/')[-1]:
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            else:
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99015'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["create_date"] = ''.join(li.xpath('span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mohurdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = html_json['data']['pages']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'https://www.mohurd.gov.cn/dynamic/admin/standard'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99015'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["create_date"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_mohurdlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = html_json['data']['pages']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'https://www.mohurd.gov.cn/dynamic/admin/standard'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99015'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["create_date"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_mohurdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mohurdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = clean_pubdate(article_json['create_date'])
    # pub_year = pub_date[:4]
    res = Selector(text=html)

    title = "".join(res.xpath('//span[contains(text(),"文件名称")]/following::p[1]/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//td[contains(text(),"公文名称：")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//h3[@id="share-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(res.xpath('//span[contains(text(),"发文日期：")]/following::p[1]/text()').extract_first())
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = clean_pubdate(res.xpath('//td[contains(text(),"发文日期：")]/following::td[1]/text()').extract_first())
        pub_date = pub_date_info.split('发文日期：')[-1]
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = clean_pubdate(res.xpath('//span[contains(text(),"发布时间")]/text()').extract_first())
        pub_date = pub_date_info.split('发布时间：')[-1]
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="content-title"]/p//text()').extract())
        pub_date = re.findall('\d+年\d+月\d+日', pub_date_info.replace('号', '日'))[0]
        year = re.findall('(\d+)年', pub_date)
        year = year[0].rjust(4, '0') if year else '0000'
        mouth = re.findall('(\d+)月', pub_date)
        mouth = mouth[0].rjust(2, '0') if mouth else '00'
        day = re.findall('(\d+)日', pub_date)
        day = day[0].rjust(2, '0') if day else '00'
        pub_date = year + mouth + day
        pub_year = pub_date[:4]
    if 'class="info-list"' in html:
        pub_no = ''.join(res.xpath(
            '//td[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath(
            '//td[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath(
            '//td[contains(text(),"分") and contains(text(),"类：")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath(
            '//td[contains(text(),"主") and contains(text(),"词：")]/following::td[1]/text()').extract()).strip()
        invalid_date = ''.join(res.xpath(
            '//td[contains(text(),"废") and contains(text(),"期：")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath(
            '//td[contains(text(),"发文单位：")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath(
            '//div[@class="centralInfo-box"]//span[contains(text(),"文") and contains(text(),"号")]/following::p[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath(
            '//div[@class="centralInfo-box"]//span[contains(text(),"索") and contains(text(),"号")]/following::p[1]/text()').extract()).strip()
        subject = ''.join(res.xpath(
            '//div[@class="centralInfo-box"]//span[contains(text(),"主题信息")]/following::p[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//div[@class="centralInfo-box"]//span[contains(text(),"主") and contains(text(),"词")]/following::p[1]/text()').extract()).strip()
        invalid_date = ''.join(res.xpath(
            '//div[@class="centralInfo-box"]//span[contains(text(),"有") and contains(text(),"期")]/following::p[1]/text()').extract()).strip()
        organ = ''.join(res.xpath(
            '//div[@class="centralInfo-box"]//span[contains(text(),"发文单位")]/following::p[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="editor-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99015'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOHURD"
    zt_provider = "mohurdcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="editorContent-download"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国水利部
def policy_mwrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 11
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="slnewsconlist"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if callmodel.sql_model.list_rawid == 'zcjd':
                base_url = f'http://www.mwr.gov.cn/zw/zcjd/index.html'
            elif callmodel.sql_model.list_rawid == 'zcfg/fl':
                base_url = f'http://www.mwr.gov.cn/zw/zcfg/fl/index.html'
            elif callmodel.sql_model.list_rawid == 'zcfg/xzfghfgxwj':
                base_url = f'http://www.mwr.gov.cn/zw/zcfg/xzfghfgxwj/index.html'
            elif callmodel.sql_model.list_rawid == 'zcfg/bmgz':
                base_url = f'http://www.mwr.gov.cn/zw/zcfg/bmgz/index_1.html'
            elif callmodel.sql_model.list_rawid == 'zcfg/gfxwj':
                base_url = f'http://www.mwr.gov.cn/zw/zcfg/gfxwj/index_1.html'
            elif callmodel.sql_model.list_rawid == 'tzgg/tzgs':
                base_url = f'http://www.mwr.gov.cn/zw/tzgg/tzgs/index_1.html'
            else:
                base_url = f'http://www.mwr.gov.cn/zw/tzgg/zbgg/index_1.html'
            url = parse.urljoin(base_url, href)
            if '.htm' not in url or '_' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99016'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mwrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mwrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = "".join(res.xpath('//strong[contains(text(),"名　　称：")]/parent::td/following::td[1]/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//div[@id="slywxl2"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = cleaned(res.xpath('//strong[contains(text(),"文　　号：")]/parent::td/following::td[1]/text()').extract_first())
    index_no = cleaned(res.xpath('//strong[contains(text(),"索  引  号：")]/parent::td/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//strong[contains(text(),"发布机构：")]/parent::td/following::td[1]/text()').extract_first())
    written_date = cleaned(res.xpath('//strong[contains(text(),"成文日期")]/parent::td/text()').extract_first())

    fulltext_xpath = '//div[@class="gknb_content"]|//div[@class="xlcontainer"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99016'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MWR'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mwrcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = written_date
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国商务部
def policy_mofcomlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage = "(\d+)"', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="txtList_01"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.mofcom.gov.cn'
            url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99017'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mofcomarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mofcomarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    pub_date = article_json['pub_date'].replace('[', '').replace(']', '')
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = "".join(res.xpath('//div[@class="art-title"]//text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//h1[@id="artitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_info = ''.join(res.xpath('//div[@class="art-con art-con-bottonmLine"]//text()|//div[@id="zoom"]//text()').extract())
    pub_no_info = re.findall('【发布文号】(.*?)[【]', pub_no_info)
    pub_no_info = pub_no_info[0].strip().replace('&nbsp;', '').replace('<BR>', '') if pub_no_info else ''
    pub_no = pub_no_info if len(pub_no_info) < 20 else ''
    pub_no = pub_no if pub_no else ''
    organ_info = re.findall('source = "(.*?)"', html)
    organ = organ_info[0] if organ_info else ''
    subject_info = re.findall('contype = "(.*?)"', html)
    subject = subject_info[0] if subject_info else ''
    keyword = cleaned(res.xpath('//p[@class="biaoqian"]/a/text()').extract())

    fulltext = res.xpath('//div[@class="art-con art-con-bottonmLine"]|//div[@id="zoom"]').extract()
    if fulltext:
        fulltext = ' '.join(fulltext)
    else:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99017'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MOFCOM'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mofcomcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['subject'] = subject
    data['keyword'] = keyword
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, '(//div[@class="art-con art-con-bottonmLine"]|//div[@id="zoom"])')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国国家卫生健康委员会
def policy_nhclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div','(\d+)'", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 558
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="wgblist"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.nhc.gov.cn/cms-search/xxgk/'
            url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99018'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            article_json["org"] = li.xpath('div[@class="xxgkorg"]/text()').extract_first()
            article_json["wenhao"] = li.xpath('div[@class="xxgkwenhao"]/text()').extract_first()
            article_json["time"] = li.xpath('div[@class="xxgktime"]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nhcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nhcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    pub_date = clean_pubdate(article_json['pub_date'])
    res = Selector(text=html)
    title = "".join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date = clean_pubdate(res.xpath('//td[contains(text(),"发布日期")]/following::td[1]/text()').extract_first())
    pub_year = pub_date[:4]
    pub_no = cleaned(res.xpath('//td[contains(text(),"文号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
    subject_word = cleaned(res.xpath('//td[contains(text(),"主 题 词")]/following::td[1]/text()').extract_first())
    index_no = cleaned(res.xpath('//td[contains(text(),"索")]/following::td[1]/text()').extract_first())

    fulltext_xpath = '//div[@class="con_font"]|//div[@id="xw_box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99018'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NHC"
    zt_provider = "nhccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国自然资源部
def policy_memlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="cont"]/ul/li|//div[@class="scy_main_V2_list"]/table/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/div/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.mem.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            if '_' in url.split('/')[-1]:
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            else:
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99019'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/div/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/div/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_memarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_memarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = "".join(res.xpath('//div[@class="scy_detail_top"]//td[contains(text(),"标题：")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//div[@class="zhenwen"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@class="scy_detail_top"]//td[contains(text(),"发文字号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[@class="scy_detail_top"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[@class="scy_detail_top"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="scy_detail_top"]//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
    raw_nature = ''.join(res.xpath('//div[@class="scy_detail_top"]//td[contains(text(),"公文种类：")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath(
        '//div[@class="scy_detail_top"]//td[contains(text(),"效力：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="scy_detail_top"]//td[contains(text(),"发文单位：")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="content"]|//div[@class="zhenwen_neir"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99019'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MEM"
    zt_provider = "memcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['raw_nature'] = raw_nature
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="gk-file"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国务院国有资产监督管理委员会
def policy_sasaclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('maxPageNum = (\d+);', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 17
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_2603340_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if callmodel.sql_model.page_index == 0:
            li_list = res.xpath('//div[@class="zsy_conlist"]/ul//li')
        else:
            li_list = res.xpath('//li')
        for li in li_list:
            if 'style=' in li.extract():
                continue
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.sasac.gov.cn/'
            if '../../../' in href:
                url = base_url + href.replace('../../../', '')
            else:
                url = href
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99020'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            date_info = li.xpath('span/text()').extract_first()
            article_json["pub_date"] = date_info.replace('[', '').replace(']', '') if date_info else ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sasacarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sasacarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = "".join(res.xpath('//div[@class="zsy_cotitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    organ_info = res.xpath('//p[contains(text(),"文章来源：")]/text()').extract_first()
    if organ_info:
        organ_info = re.findall('文章来源：(.*?)发布时间', organ_info)
    organ = organ_info[0].strip() if organ_info else ''

    fulltext = res.xpath('//div[@class="zsy_comain"]').extract()
    if fulltext:
        fulltext = ' '.join(fulltext)
    else:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99020'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SASAC'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'sasaccngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['organ'] = organ
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, '(//div[@class="zsy_comain"])')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_sasaclist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = json.loads(re.findall('\((.*)\);', para_dicts["data"]["1_1"]['html'])[0])
        total_page = data['totalPageNum']
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['resultMap']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = f"http://xxgk.sasac.gov.cn:8080/gdnps/pc/content.jsp?id={item['id']}"
            temp["rawid"] = item['id']
            temp["sub_db_id"] = '99020'
            article_json["url"] = url
            article_json["title"] = item['title']
            article_json["wh"] = item.get('wh', "")
            article_json["publishTime"] = item.get('publishTime', "")
            article_json["syh"] = item.get('syh', "")

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sasacarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sasacarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    pub_date = cleaned(article_json['publishTime'][:8])
    text = re.findall('\((\{.*?\})\);', html)[0]
    json_info = json.loads(text)['resultMap'][0]

    pub_year = pub_date[:4]
    pub_no = json_info.get('wh', '')
    organ = json_info.get('xxly', '')
    index_no = json_info.get('syh', '')
    subject = json_info.get('subjectName', '')
    subject_word = json_info.get('ztc', '')
    subject_word = re.sub(' +', ';', subject_word)
    fulltext = json_info.get('htmlContent', '')

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99020'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SASAC'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'sasaccngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    res = Selector(text=fulltext)
    file_info = get_file_info(data, res, '')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家市场监督管理总局
def policy_samrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if callmodel.sql_model.list_rawid == 'gz':
            max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 26
            total_page = max_count
        else:
            max_count = re.findall('m_nRecordCount = "(\d+)"', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1062
            total_page = math.ceil(max_count/25)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if callmodel.sql_model.list_rawid == 'gz':
                    dic = {"page_info": f"https://www.samr.gov.cn/zw/gz/index_{page}.html"}
                elif callmodel.sql_model.list_rawid == 'zjwj':
                    dic = {"page_info": f"https://gkml.samr.gov.cn/2140/2167/list_{page}.html"}
                elif callmodel.sql_model.list_rawid == 'tyta':
                    dic = {"page_info": f"https://gkml.samr.gov.cn/2140/2184/list_{page}.html"}
                elif callmodel.sql_model.list_rawid == 'zcjd':
                    dic = {"page_info": f"https://gkml.samr.gov.cn/2140/2221/list_{page}.html"}
                else:
                    dic = {"page_info": f"https://gkml.samr.gov.cn/2140/2173/list_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if callmodel.sql_model.list_rawid == 'gz':
            li_list = res.xpath('//div[@class="lisccont"]/ul')
        else:
            li_list = res.xpath('//div[@class="row"]')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()

            if callmodel.sql_model.list_rawid == 'gz':
                href = li.xpath('.//div[@class="gzbt"]/a/@href').extract_first()
                base_url = f'https://gkml.samr.gov.cn/'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99022'
                article_json["url"] = url
                article_json["title"] = li.xpath('.//div[@class="gzbt"]/a/text()').extract_first()
            else:
                href = li.xpath('li[@class="mc"]/div/a/@href').extract_first()
                base_url = f'https://gkml.samr.gov.cn/'
                url = base_url + href.replace('../../', '')
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99022'
                article_json["url"] = url
                article_json["title"] = li.xpath('li[@class="mc"]/div/a/text()').extract_first()
                article_json["pub_date"] = li.xpath('li[@class="fbrq"]/text()').extract_first()
                article_json["wh"] = li.xpath('li[@class="wh"]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_samrlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        html = html_json['data']['html']
        max_count = re.findall('count=\\"(\d+)', html)
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=html)
        li_list = res.xpath('//div[@class="page-content"]/ul/div/ul')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('li/a/@href').extract_first()
            base_url = f'https://www.samr.gov.cn/zw/zjwj/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99022'
            article_json["url"] = url
            article_json["title"] = li.xpath('li/a/text()').extract_first()
            article_json["pub_date"] = li.xpath('li[contains(@class,"time")]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_samrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_samrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    res = Selector(text=html)
    title = "".join(res.xpath('//li[contains(text(),"题：")]/following::li[1]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(res.xpath('//li[contains(text(),"发布日期")]/following::li[1]/text()').extract_first())
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = cleaned(res.xpath('//li[contains(text(),"公布日期")]/following::li[1]/text()').extract_first())
        pub_year = pub_date[:4]
    pub_no = cleaned(res.xpath('//li/span[contains(text(),"文")]/following::li[1]/text()').extract_first())
    organ = cleaned(res.xpath('//li[contains(text(),"所属机构")]/following::li[1]/text()').extract_first())
    index_no = cleaned(res.xpath('//li/span[contains(text(),"索")]/following::li[1]/text()').extract_first())
    written_date = cleaned(res.xpath('//li[contains(text(),"成文日期")]/following::li[1]/text()').extract_first())
    subject = cleaned(res.xpath('//li[contains(text(),"主题分类")]/following::li[1]/text()').extract_first())
    fulltext = res.xpath('//div[@class="Three_xilan_07"]').extract_first()
    if not fulltext:
        raise Exception
    if not organ:
        organ_info = res.xpath('//div[@class="gzpub"]/text()').extract_first()
        organ = organ_info.replace('发布', '') if organ_info else ''
    if not pub_date:
        pub_date_info = res.xpath('//strong/ancestor::p/following::p[1]/font/text()').extract_first()
        pub_date = re.findall('\d+年\d+月\d+日', pub_date_info)[0]
        pub_year = pub_date[:4]

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99022'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SAMR'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'samrcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['organ'] = organ
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['written_date'] = written_date
    data['subject'] = subject
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, '(//div[@class="Three_xilan_07"])')
    di_model_bef = DealUpdateModel()
    new_file = list()
    for i in file_info:
        if 'sign=' in i['url']:
            continue
        new_file.append(i)
    if new_file:
        di_model_bef.update.update({"other_dicts": json.dumps(new_file, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家税务总局
def policy_chinataxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['totalHits']
        total_page = math.ceil(max_count/10)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['resultList']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['url']
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99023'
            article_json["url"] = url
            article_json["title"] = item.get('dreTitle', '')
            article_json["DOCNOVAL"] = item['customHs'].get('DOCNOVAL', '')
            article_json["publishTime"] = item.get('publishTime', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chinataxlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['results']['data']['total']
        total_page = math.ceil(tcount / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['results']['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['redirectUrl']
            base_url = f'http://www.chinatax.gov.cn/chinatax/n810214/n810641/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99023'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["publishTime"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chinataxlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>共(\d+)页<', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.chinatax.gov.cn/{callmodel.sql_model.list_rawid}'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99023'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["publishTime"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chinataxlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"total":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res_json = json.loads(para_dicts["data"]["1_1"]['html'])
        for li in res_json['searchTotal']:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            if not href:
                continue
            base_url = f'https://www.chinatax.gov.cn/chinatax/n810341/n810825/index.html?title='
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99023'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["publishTime"] = li['pubDate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chinataxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chinataxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['publishTime'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(
        res.xpath('//span[@class="dhgao title1"]//text()|//span[@class="dhgao"]//text()|//div[@class="title sv_texth1"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//span[@class="hao1"]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//ul[@class="xxgkxx"]//span[contains(text(),"索引号")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//ul[@class="xxgkxx"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="fontzoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99023'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHINATAX"
    zt_provider = "chinataxcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家医疗保障局
def policy_nhsalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 15)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[2]/a/@href').extract_first()
            base_url = f'http://www.nhsa.gov.cn/col/col109/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or 'nhsa' not in url:
                continue
            rawid = re.findall('art_.*?_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99024'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[4]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nhsaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nhsaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(
        res.xpath('//div[@class="atricle-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@class="mu-table"]//span[contains(text(),"发文字号：")]/following::span[1]/text()').extract()).strip()
    pub_no = pub_no.replace('-', '')
    index_no = ''.join(res.xpath(
        '//div[@class="mu-table"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="mu-table"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99024'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NHSA"
    zt_provider = "nhsacngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家机关事务管理局
def policy_ggjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\(".*?",(\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"pageTotal":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"url_part": f"{list_json['url_part']}", "page_info": list_json['page_info'].replace('index', f'index_{page}')}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'zcfg' == callmodel.sql_model.list_rawid or 'zcjd2' == callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['records']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                base_url = f'https://gkml.ggj.gov.cn/index_423.htm'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or 'ggj' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99025'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['date']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//dl[@class="listbox boxcenter"]/dt')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if not href:
                    continue
                base_url = f'http://www.ggj.gov.cn/{callmodel.sql_model.list_rawid}index.htm'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99025'
                article_json["url"] = url
                title_info = ''.join(li.extract()).strip()
                article_json["title"] = re.findall("showTitle\('(.*?)'\);", title_info)[0]
                article_json["pub_date"] = li.xpath('i/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ggjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ggjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="titlebox boxcenter"]//text()|//div[@class="news_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"发文字号")]/following::div[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"分") and contains(text(),"类")]/following::div[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"是否有效")]/following::div[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"发布单位")]/following::div[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="news_content"]|//div[@class="conbox boxcenter"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99025'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GGJ"
    zt_provider = "ggjcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="idappendix"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中国银行保险监督管理委员会
def policy_cbirclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 18)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['rows']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = str(li['docId'])
            url = f'https://www.cbirc.gov.cn/cn/view/pages/ItemDetail.html?docId={rawid}&generaltype=1'

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99026'
            article_json["url"] = url
            article_json["title"] = li['docSubtitle']
            article_json["publishDate"] = li['publishDate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cbirclist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['lists']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = li['docId']
            url = f'https://www.cbirc.gov.cn/cn/view/pages/ItemDetail.html?docId={rawid}&generaltype=1'

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99026'
            article_json["url"] = url
            article_json["title"] = li['docSubtitle']
            article_json["publishDate"] = li['publishDate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cbircarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cbircarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['data']['docClob']
    if not html:
        html = ''
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['publishDate'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = html_json['data']['docSubtitle']
    if not title:
        title = article_json['title'].strip()
    pub_no = cleaned(html_json['data'].get('documentNo', ''))
    index_no = cleaned(html_json['data'].get('indexNo', ''))
    subject = cleaned(html_json['data'].get('interviewTypeName', ''))
    organ = cleaned(html_json['data'].get('docSource', ''))
    # fulltext_xpath = '//div[@class="xxbt"]'
    res = Selector(text=html)
    if '<head>' in html:
        fulltext = res.xpath(
            '//div[@class="WordSection1"]|//div[@class="Section0"]|//div[@class="Section1"]|//div[@class="Section1"]').extract_first()
    else:
        fulltext = html

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99026'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CBIRC"
    zt_provider = "cbirccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    if '<head>' in html:
        file_info = get_file_info(data, res, '(//div[@class="WordSection1"]|//div[@class="Section0"]|//div[@class="Section1"])')
    else:
        file_info = get_file_info(data, res, '')
    attachmentInfoVOList = html_json['data'].get('attachmentInfoVOList', '')
    file_info2 = list()
    if type(attachmentInfoVOList) is list:
        for attachment in attachmentInfoVOList:
            purl = 'https://www.cbirc.gov.cn' + attachment["urlOtherName"]
            dic = {'url': purl, 'name': attachment["title"], 'pub_year': pub_year, 'keyid': lngid}
            file_info2.append(dic)
    file_info = file_info + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中国证券监督管理委员会
def policy_neriscsrclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['pageUtil']['rowCount']
        total_page = math.ceil(max_count/20)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['pageUtil']['pageList']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = f'https://neris.csrc.gov.cn/falvfagui/rdqsHeader/mainbody?navbarId=1&secFutrsLawId={item["secFutrsLawId"]}'
            rawid = item["secFutrsLawId"]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99027'
            article_json["url"] = url
            article_json["title"] = item.get('secFutrsLawName', '')
            article_json["fileno"] = item.get('fileno', '')
            article_json["lawPubOrgName"] = item.get('lawPubOrgName', '')
            article_json["pub_date"] = item.get('secFutrsLawVersion', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_neriscsrcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_neriscsrcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']

    json_info = json.loads(html)
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    pub_no = article_json['fileno']
    pub_no = pub_no if pub_no else ''
    organ = article_json['lawPubOrgName']
    if len(pub_date) != 8:
        pub_date_info = json_info['lawlist']['law']['pubDate']/1000
        pub_date = time.strftime("%Y%m%d", time.localtime(pub_date_info))
        pub_year = pub_date[:4]
    legal_status_num = cleaned(json_info['lawlist']['law']['lawAthrtyStsCde'])
    if legal_status_num == '1':
        legal_status = '现行有效'
    elif legal_status_num == '2':
        legal_status = '已被修改'
    elif legal_status_num == '3':
        legal_status = '已被废止'
    else:
        raise Exception
    fulltext_list = list()
    fulltext_list.append(cleaned(json_info['lawlist']['law']['bodyAgoCntnt']))
    for item in json_info['lawlist']['lawEntryVOs']:
        title_text = cleaned(item['title']) + ' ' + cleaned(item['cntnt'])
        fulltext_list.append(title_text)
        body_text_list = list()
        if item['itemList']:
            for body_item in item['itemList']:
                body_t = body_item['title'] + ' ' + body_item['cntnt']
                body_text_list.append(body_t)
        body_text = '\n'.join(body_text_list)
        fulltext_list.append(body_text)
    fulltext = '\n'.join(fulltext_list)
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99027'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CSRC'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'csrccngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['organ'] = organ
    data['pub_no'] = pub_no
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    res = Selector(text=fulltext)
    file_info = get_file_info(data, res, '')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_neriscsrclist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"total":(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/10)
        else:
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'csrc/c106256' == callmodel.sql_model.list_rawid:
                    dic = {"page_info":  f'/fg_{page}.shtml'}
                else:
                    dic = {"page_info": f'&page={page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'csrc/c106256' != callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['data']['results']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                base_url = f'http://www.csrc.gov.cn/csrc/c101953/zfxxgk_zdgk.shtml'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or 'csrc' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99027'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['publishedTimeStr']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//tbody[@id="zc-list-content"]/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                if not href:
                    continue
                base_url = f'http://www.csrc.gov.cn/csrc/c106256/fg.shtml'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99027'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/span[@class="til"]/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_neriscsrcarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_neriscsrcarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content-body"]/h3//text()|//div[@class="content"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xxgk-table"]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xxgk-table"]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xxgk-table"]//th[contains(text(),"分") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"是否有效")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xxgk-table"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="content-body"]|//div[@class="detail-news"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99027'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CSRC"
    zt_provider = "csrccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   行政法规库
def policy_xzfgmojlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('pagecount">(\d+)<', para_dicts["data"]["1_1"]['html'])
        total_page = int(max_count[0]) if max_count else 61
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//td[@class="listLef"]')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('h3/a/@href').extract_first()
            base_url = f'http://xzfg.moj.gov.cn/search2.html?PageIndex=1'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('LawID=(\d+)', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99028'
            article_json["url"] = url
            article_json["title"] = li.xpath('h3/a/text()').extract_first()
            article_json["tizhu"] = li.xpath('div[@class="searContent"]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xzfgmojarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xzfgmojarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']

    res = Selector(text=html)
    fulltext_str = article_json['tizhu']
    date_dict = {"O": "0", "〇": "0", "○": "0", "Ｏ": "0", "0": "0", "零": "0", "一": "1", "二": "2", "三": "3", "四": "4",
                 "五": "5", "六": "6", "七": "7", "八": "8", "九": "9", "十": "10", "十一": "11", "十二": "12", "十三": "13",
                 "十四": "14", "十五": "15", "十六": "16", "十七": "17", "十八": "18", "十九": "19", "一十": "10", "一十一": "11",
                 "一十二": "12", "一十三": "13", "一十四": "14", "一十五": "15", "一十六": "16", "一十七": "17", "一十八": "18", "一十九": "19",
                 "二十": "20", "二十一": "21", "二十二": "22", "二十三": "23", "二十四": "24", "二十五": "25", "二十六": "26", "二十七": "27",
                 "二十八": "18", "二十九": "29", "廿": "20", "廿一": "21", "廿二": "22", "廿三": "23", "廿四": "24", "廿五": "25",
                 "廿六": "26", "廿七": "27", "廿八": "18", "廿九": "29", "三十": "30", "三十一": "31", }
    pub_date_info = re.findall('[O〇○Ｏ0零一二三四五六七八九十廿]{4}年[O〇○Ｏ0零一二三四五六七八九十廿]{1,3}月[O〇○Ｏ0零一二三四五六七八九十廿]{1,3}[日,号]',
                               fulltext_str)
    if not pub_date_info:
        pub_date_info = re.findall('\d{4}年\d{1,2}月\d{1,2}[日,号]', fulltext_str)
    if not pub_date_info:
        raise Exception
    pub_date = pub_date_info[0]
    year_info = re.findall('(\d{4})年', pub_date)
    if year_info:
        year = year_info[0].rjust(4, '0')
    else:
        year_info = re.findall('(.*)年', pub_date)
        year = ''.join([date_dict[x] for x in year_info[0]])
    mouth_info = re.findall('(\d+)月', pub_date)
    if mouth_info:
        mouth = mouth_info[0].rjust(2, '0')
    else:
        mouth_info = re.findall('年(.*)月', pub_date)
        mouth = date_dict[mouth_info[0]].rjust(2, '0')
    day_info = re.findall('(\d+)[日,号]', pub_date)
    if day_info:
        day = day_info[0].rjust(2, '0')
    else:
        day_info = re.findall('月(.*)[日,号]', pub_date)
        day = date_dict[day_info[0]].rjust(2, '0')
    pub_date = year + mouth + day
    pub_date = str(int(pub_date))
    pub_year = pub_date[:4]
    fulltext = res.xpath('//div[@class="detailCon"]').extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99028'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'XZFGMOJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'xzfgmojcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, '(//div[@class="detailCon"])')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result

# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


#   北京市
def policy_beijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共检索到(\d+)页', para_dicts["data"]["1_1"]['html'])
        total_page = int(max_count[0]) if max_count else 673
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        text = re.findall('var html="";.*?\$\("#listBox"\).html\(html\);', para_dicts["data"]["1_1"]['html'], re.S)[0]
        li_list = re.findall("='(.*?)';", text)
        li_info = ''.join(li_list)
        res = Selector(text=li_info)
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.beijing.gov.cn/zhengce/zcdh'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99029'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_beijinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['results']['data']['total']
        total_page = html_json['totlePage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docList']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'http://www.beijing.gov.cn/so/zcdh/zcjd'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99029'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['docDate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_beijinglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('Pager\(\{size:(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            if '_' not in url.split('/')[-1]:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99029'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/text()').extract()).strip()
            article_json["pub_date"] = ''.join(li.xpath('span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_beijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_beijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = clean_pubdate(article_json.get('pub_date', ''))
    # pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[发布日期]")]/span//text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[成文日期]")]/span//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(), "日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json.get('pub_date', ''))
        pub_year = pub_date[:4]
        if pub_date == '00000000':
            raise Exception
    pub_no = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[发文字号]")]/span//text()').extract()).strip()
    pub_no = pub_no.replace('-', '')
    if pub_no == '〔〕号':
        pub_no = ''
    index_no = ''.join(res.xpath('//div[@class="info_table"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    index_no = index_no.replace('-', '')
    subject = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[主题分类]")]/span//text()').extract()).strip()
    subject = subject.replace('-', '')
    written_date = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[成文日期]")]/span//text()').extract()).strip()
    written_date = written_date.replace('-', '')
    impl_date = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[实施日期]")]/span//text()').extract()).strip()
    impl_date = impl_date.replace('-', '')
    invalid_date = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[废止日期]")]/span//text()').extract()).strip()
    invalid_date = invalid_date.replace('-', '')
    legal_status = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[有效性]")]/span//text()').extract()).strip()
    legal_status = legal_status.replace('-', '')
    if legal_status == "是":
        legal_status = '有效'
    else:
        legal_status = '无效'
    organ = ''.join(res.xpath('//ol[@class="doc-info clearfix"]//li[contains(text(),"[发文机构]")]/span//text()').extract()).strip()
    organ = organ.replace('-', '')

    fulltext_xpath = '//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99029'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BEIJING"
    zt_provider = "beijingcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   天津市
def policy_tianjinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['page']['totalPages']
        total_page = max_count if max_count <= 1000 else 1000
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['page']['content']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['DOCPUBURL']
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99030'
            article_json["url"] = url
            article_json["title"] = item.get('BT', '')
            article_json["FBRQ"] = item.get('FBRQ', '')
            article_json["FWJG_name"] = item.get('FWJG_name', '')
            article_json["FWZH"] = item.get('FWZH', '')
            article_json["IDXID"] = item.get('IDXID', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tianjinlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list-circle-red"]/div|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if 'sy/tzgg' in callmodel.sql_model.list_rawid:
                href_info = li.xpath('span[@class="list-item-con"]/a/@onclick').extract_first()
                href = re.findall("\('(.*?)'\)", href_info)[0]
            else:
                href = li.xpath('span[@class="list-item-con"]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99030'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[@class="list-item-con"]/a/@title|a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="list-item-date"]/text()|.//span[@class="xl-r2li-s3"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tianjinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tianjinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json.get('FBRQ', ''))
        pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99030'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJ"
    zt_provider = "tjcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   上海市
def policy_shanghailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalPage: (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('count="(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/10)
        else:
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'XingZhengWenDangKuJyh' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"currentPage={page}","url_part": list_json['url_part']}
                else:
                    dic = {"page_info": f"index_{page}.html","url_part": list_json['url_part']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list-date")]/li|//table[@class="table table-list"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href|a/@href').extract_first()
            if not href:
                continue
            if 'XingZhengWenDangKuJyh' in callmodel.sql_model.list_rawid:
                base_url = f'http://service.shanghai.gov.cn/XingZhengWenDangKuJyh/XZGFList.aspx'
                url = parse.urljoin(base_url, href)
                rawid = url.split('docid=')[-1]
            else:
                base_url = f'https://www.shanghai.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99031'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/@title|a/@title').extract_first().strip()
            article_json["pub_date"] = ''.join(li.xpath('a/span/text()|span/text()|td[3]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shanghailist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['page']['count']
        total_page = math.ceil(tcount/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['page']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['id']
            url = f'https://www.shanghai.gov.cn/gwk/search/content/{href}'
            if 'htm' not in url:
                continue
            rawid = href
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99031'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['display_date']
            article_json["pub_no"] = f"{li['document_agency']}〔{li['docDate']}〕{li['docDate']}号"
            article_json["organ"] = li.get('draft_unit', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shanghaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shanghaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="ivs_title"]//text()|//div[@class="insMainConTitle_b"]//text()').extract()).strip()
    if '：' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = article_json.get('pub_no',  '')
    organ = article_json.get('organ',  '')
    if not pub_no:
        pub_no = ''.join(res.xpath('//div[@class="border-red"]//dt[contains(text(),"文件编号")]/following::dd[1]/text()').extract()).strip()
    if not pub_no:
        pub_no = ''.join(res.xpath('//small[@class="PBtime"]/p/label[3]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//div[@class="border-red"]//dt[contains(text(),"施行日期")]/following::dd[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="border-red"]//dt[contains(text(),"文件状态")]/following::dd[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99031'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHANGHAI"
    zt_provider = "shanghaicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[contains(@class, "gaoj-list")])')
    file_info3 = get_file_info(data, res, f'(//div[@class="pdf-box"])')
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市
def policy_chongqinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['data']['pager']['pageCount']
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['data']['list']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get('doc_pub_url', '')
            if not url:
                continue
            if 'policyId=' in url:
                rawid = re.findall('policyId=(.*)', url)[0]
                temp["task_tag"] = 'chongqingarticle1'
            else:
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99032'
            article_json["url"] = url
            article_json["title"] = item.get('f_202121500898', '')
            article_json["pub_date"] = item.get('f_202121607647', '')
            article_json["wh"] = item.get('f_202121837479', '')
            article_json["org"] = item.get('f_202121437464', '')
            article_json["suoyin"] = item.get('f_202121273539', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chongqinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPage\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="common-list"]/ul/li|//div[@class="content"]/ul/li|//div[@class="item-con"]/ul/a|//tr[contains(@class,"zcwjk-list-c")]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href|@href').extract_first()
            # if not href:
            #     continue
            base_url = f'https://www.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99032'
            article_json["url"] = url
            article_json["title"] = li.xpath('ul/li[@class="file-title"]/text()|td[2]/a/p/text()|a/text()').extract_first().strip()
            if 'xzgfxwj' in callmodel.sql_model.list_rawid:
                pub_date_info = ''.join(li.xpath('.//p[@class="info"]/span[2]/text()').extract()).strip()
                article_json["pub_date"] = pub_date_info.split('：')[-1].strip()
                wh_info = ''.join(li.xpath('.//p[@class="info"]/span[1]/text()').extract()).strip()
                article_json["wh"] = wh_info.split('：')[-1].strip()
            else:
                article_json["pub_date"] = li.xpath('ul/li[@class="pub-time"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chongqingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chongqingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table[@class="table"]//td[contains(text(),"发布日期")]/following::td[1]//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = article_json.get('wh', '')
    if not pub_no:
        pub_no = ''.join(res.xpath('//table[@class="table"]//span[contains(text(),"发文字号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="table"]//span[contains(text(),"索引号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="table"]//span[contains(text(),"主题分类")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="table"]//span[contains(text(),"成文日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="table"]//span[contains(text(),"有 效 性")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="table"]//span[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[@class="document mt-1 mt-12"]|//div[@class="zcwjk-xlcon"]|//div[@class="c-txt left"]|//div[@class="Section1"]|//div[@class="lf-box-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99032'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQ"
    zt_provider = "cqcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_chongqingarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chongqingarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['data']['DETAIL']['ZCYW']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = article_json['pub_date']
    # pub_year = pub_date[:4]
    res = Selector(text=html)
    title = html_json['data']['DETAIL']['WJMC']
    if not title:
        title = article_json['title'].strip()
    pub_date_info = html_json['data']['DETAIL']['FWSJZD']
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = html_json['data']['DETAIL']['FWZH']
    subject = html_json['data']['DETAIL']['ZCFL']
    written_date = html_json['data']['DETAIL']['FWZH']
    organ = html_json['data']['DETAIL']['FWDW']
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext = html

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99032'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQ"
    zt_provider = "cqcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'(//body)')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省
def policy_hebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'moduleId' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"currentPage={page}"}
                else:
                    dic = {"page_info": f"index{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '6998412' in callmodel.sql_model.list_rawid or '6940138' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="xxgkzclbtab3"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://info.hebei.gov.cn/{callmodel.sql_model.list_rawid}'
                url = parse.urljoin(base_url, href)
                rawid = url.split('/')[-2]

                temp["rawid"] = rawid
                temp["sub_db_id"] = '99033'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()|td[2]/a/font/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="xxgk_gfxwjk-list-mode"]/li|//table[@class="zfxxgkzclbtab3"]/tbody/tr|//ul[@class="xxgk-xxbox"]/li|//ul[@class="zt_wjjd_ler"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('p[2]/a/@href|td[1]/a/@href|span[@class="xxgk-line-comtit fl"]/a/@href|span[@class="zt_wjjd_title"]/a/@href').extract_first()
                # if not href:
                #     continue
                base_url = f'http://info.hebei.gov.cn/{callmodel.sql_model.list_rawid}'
                url = parse.urljoin(base_url, href)
                if 'pageId=' in url:
                    rawid = re.findall('pageId=(.*?)&', url)[0]
                else:
                    rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99033'
                article_json["url"] = url
                article_json["title"] = li.xpath('p[2]/a/text()|td[1]/a/text()|td[1]/a/font/text()|span[@class="xxgk-line-comtit fl"]/a/text()|span[@class="zt_wjjd_title"]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('p[4]/text()|td[3]/text()|span[@class="xxgk-line-comdate fr"]/text()|span[@class="zt_wjjd_date"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="xxgk_gfxwjk_xqy-title"]//text()|//h2[@class="cont_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'xxgk_gfxwjk-xqy-touxx' in html:
        pub_no = ''.join(res.xpath('//div[@class="xxgk_gfxwjk-xqy-touxx"]//span[contains(text(),"发文字号")]/parent::p[1]/text()').extract()).strip()
        # index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="xxgk_gfxwjk-xqy-touxx"]//span[contains(text(),"主题分类")]/parent::p[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="xxgk_gfxwjk-xqy-touxx"]//span[contains(text(),"成文日期")]/parent::p[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="xxgk_gfxwjk-xqy-touxx"]//span[contains(text(),"效力状态")]/parent::p[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="xxgk_gfxwjk-xqy-touxx"]//span[contains(text(),"发布机构")]/parent::p[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="xxgk_bmxl"]//strong[contains(text(),"发文字号：")]/following::td[1]/text()').extract()).strip()
        # index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="xxgk_bmxl"]//strong[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
        written_date = ''
        legal_status = ''.join(res.xpath('//div[@class="xxgk_bmxl"]//strong[contains(text(),"有 效 性：")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="xxgk_bmxl"]//strong[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '河北' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@class="xxgk_gfxwjk-xqy-neir"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99033'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HEBEI"
    zt_provider = "hebeicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省
def policy_liaoninglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xzgfxwj/yxwj' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="dlist_rul"]/li|//table[@class="dataList"]/tr|//table[@class="dataList1"]/tr')
        if not li_list:
            raise Exception
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if 'zwgkx/zfwj/swwj' in callmodel.sql_model.list_rawid:
                href = li.xpath('td[1]/a/@href').extract_first()
            else:
                href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            if not href or href == '#':
                continue
            base_url = f'http://www.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99035'
            article_json["url"] = url
            if 'zwgkx/zfwj/swwj' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[2]/text()').extract_first().strip()
            elif 'zwgkx/zfwj/bmwj' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                article_json["wh"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
            else:
                article_json["title"] = li.xpath('td[2]/a/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('td[5]/text()|span/text()').extract()).strip()
                article_json["wh"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_liaoninglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xzgfxwj/yxwj' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class,"tabconl")]/div/ul|//div[@class="modcontent"]/ul/li|//ul[@id="kr-body-table"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('li[1]/a/@href|span[2]/a/@href|a/@href').extract_first()
            if not href or href == '#':
                continue
            base_url = f'http://www.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99035'
            article_json["url"] = url
            if 'zfwj/swwj' == callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('li[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('li[2]/a/text()').extract_first().strip()
            elif 'zfwj/bmwj' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('li[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('li[3]//text()').extract()).strip()
                article_json["wh"] = ''.join(li.xpath('li[2]//text()').extract()).strip()
            elif 'zfxxgk1' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('span[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[4]/text()').extract_first().strip()
            else:
                article_json["title"] = li.xpath('li[1]/a/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('li[4]/text()|span/text()').extract()).strip()
                article_json["wh"] = ''.join(li.xpath('li[2]//text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_liaoningarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_liaoningarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title'].strip()
    provider_url = article_json['url']
    # pub_date = clean_pubdate(article_json.get('pub_date', ''))
    # pub_year = pub_date[:4]
    res = Selector(text=html)

    pub_no = ''.join(res.xpath('//span[contains(text(),"发文字号：")]/parent::p/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//span[contains(text(),"效力状态：")]/parent::p/text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发布机构：")]/parent::p/text()').extract()).strip()
    pub_date = ''.join(res.xpath('//span[contains(text(),"发文日期：")]/parent::p/text()').extract()).strip()
    pub_year = pub_date[:4]
    if not pub_no:
        pub_no = article_json.get('wh', '')
    if not pub_date:
        pub_date = clean_pubdate(article_json.get('pub_date', ''))
        pub_year = pub_date[:4]
    if "..." in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="TRS_Editor"]|//div[@class="zfwj_detail"]/div[@class="content"]|//div[@class="zfwj_detail"]/div[@class="content1"]|//div[@class="dlist_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99035'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LN"
    zt_provider = "lncngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   吉林省
def policy_jilinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'govsearch' in callmodel.sql_model.list_rawid:
            max_count = re.findall('"num":(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count / 16)
        elif 'response_szf' in callmodel.sql_model.list_rawid:
            max_count = re.findall('"totalCount":(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count / 15)
        else:
            max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
            if not max_count:
                max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'govsearch' in callmodel.sql_model.list_rawid:
                    dic = {"url_part": f"{list_json['url_part']}", "page_info": f"&page={page + 1}"}
                elif 'response_szf' in callmodel.sql_model.list_rawid:
                    dic = {"url_part": f"{list_json['url_part']}", "page_info": f"&pageIndex={page + 1}"}
                else:
                    dic = {"url_part": f"{list_json['url_part']}", "page_info": f"index_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'govsearch' in callmodel.sql_model.list_rawid:
            html_str = re.findall('result\((.*)\);', para_dicts["data"]["1_1"]['html'].replace('\n', '').replace('\r', '').replace('\t', ''))[0]
            html_json = json.loads(html_str)
            li_list = html_json['data']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['puburl']
                base_url = f'http://infogate.jl.gov.cn/govsearch/jsonp/zf_jd_list.jsp'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or 'jl' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99036'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['tip']['dates']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'response_szf' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['list']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['extraUrl']
                base_url = f'http://was.jl.gov.cn/was5/web/gov/response_szf_zcztc_687.jsp'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or 'jl' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99036'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['fbsj']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//ul[@class="in_list_page"]/li|//table[@class="dataList"]/tr|//div[@class="zlyjq"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href|a/@href').extract_first()
                if not href:
                    continue
                if 'szf/' in callmodel.sql_model.list_rawid:
                    base_url = f'http://xxgk.jl.gov.cn/{callmodel.sql_model.list_rawid}index_1.html'
                else:
                    base_url = f'http://www.ln.gov.cnn/{callmodel.sql_model.list_rawid}index.htm'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or 'jl' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99036'
                article_json["url"] = url
                if 'szf/xxgk/gknr/ghxx/fzgh' in callmodel.sql_model.list_rawid:
                    article_json["title"] = li.xpath('a[2]/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('a[1]/span/text()').extract_first().strip()
                else:
                    article_json["title"] = li.xpath('td[2]/a/text()|a/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('td[5]/text()|a/span/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jilinlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = html_json['data']['data']['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'https://intellsearch.jl.gov.cn/search/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99036'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jilinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jilinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="effect2"]//td[contains(text(),"标") and contains(text(),"题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@id="effect2"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@id="effect2"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@id="effect2"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@id="effect2"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="effect2"]//td[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="xx_conter1023"]|//div[@class="contents_div"]|//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99036'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JL"
    zt_provider = "jlcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省
def policy_heilongjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # page = html_json['data']['count']
        total_page = html_json['data']['pages']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['records']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('id', '')
            if not href:
                url = li['artUrl']
                if not url :
                    continue
                if 'id=' in url:
                    rawid = url.split('id=')[-1]
                else:
                    if 'htm' not in url:
                        continue
                    rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                title = li['infoTitle']
                date_stamp = datetime.datetime.fromtimestamp(li['publishTime'])
                pub_date = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
            else:
                url = f'https://zwgk.hlj.gov.cn/zwgk/publicInfo/detail?id={href}'
                rawid = href
                title = li['title']
                date_stamp = datetime.datetime.fromtimestamp(li['publishTime'])
                pub_date = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99037'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_heilongjianglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("href='index(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list_16")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.hlj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99037'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('em/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_heilongjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_heilongjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@role="contentTitle"]/text()|//div[@class="dbt"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"时效")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="zwnr"]|//div[@id="zoom"]|//div[@id="rwb_zw"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99037'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HLJ"
    zt_provider = "hljcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省
def policy_jiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 20)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 20 + 1
                end = (page + 2) * 20
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jiangsu.gov.cn/col/col84241/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or 'art_' not in url:
                continue
            rawid = re.findall('art_.*?_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99038'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath(
        '//table[@class="xxgk_table"]//td[contains(text(),"标") and contains(text(),"题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="sp_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="article_content"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99038'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIANGSU"
    zt_provider = "jiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省
def policy_zhejianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共(\d+)条记录', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 6997
        total_page = math.ceil(max_count/15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="zcwj-con-right_list"]/div')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[1]/a/@href').extract_first()
            base_url = f'http://www.zj.gov.cn/col/col1544911/index.html'
            url = parse.urljoin(base_url, href)
            if '.pdf' in url or 'zj' not in url:
                continue
            if 'art_' in url:
                rawid = re.findall('art_(.*?)\.htm', url.split('/')[-1])[0]
            else:
                rawid = re.findall('col(\d+)', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99039'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[1]/a/@title').extract_first()
            article_json["org"] = li.xpath('span[4]/text()').extract_first()
            article_json["pub_date"] = li.xpath('span[5]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhejianglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.zj.gov.cn/col/col1545696/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99039'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhejianglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = math.ceil(html_json['params']['policyList']['totalNum']/100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['params']['policyList']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('iid', '')
            url = f'https://zhengce.zj.gov.cn/policyweb/httpservice/showinfo.do?infoid={href}'
            rawid = href
            title = li['title']
            if li['pubtime']:
                date_stamp = datetime.datetime.fromtimestamp(li['pubtime'] /1000)
                pub_date = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
            else:
                pub_date = ''

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99039'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhejiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhejiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//td[@class="wzbt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'xxgk cf' in html:
        pub_no = ''.join(res.xpath('//div[@class="xxgk cf"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
        index_no = ''
        subject = ''.join(res.xpath('//div[@class="xxgk cf"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="xxgk cf"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
        legal_status = ''
        organ = ''.join(res.xpath('//div[@class="xxgk cf"]//span[contains(text(),"发文机关")]/following::span[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="xxgk"]//th[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="xxgk"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="xxgk"]//th[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="xxgk"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="xxgk"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="xxgk"]//th[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="zc_article_con"]|//div[@id="zoom"]|//div[@class="chat_cont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//ul[@class="list"]/li[contains(text(),"日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        if pub_date == '00000000':
            pub_date = ''
        pub_year = pub_date[:4]
    sign_state = "0"
    if not pub_date:
        sign_state = "1"
        date_dict = {"O":"0", "〇":"0", "○":"0", "Ｏ":"0", "0":"0", "零":"0", "一":"1","二":"2","三":"3","四":"4","五":"5","六":"6","七":"7","八":"8","九":"9","十":"10","十一":"11","十二":"12","十三":"13","十四":"14","十五":"15","十六":"16","十七":"17","十八":"18","十九":"19","一十":"10","一十一":"11","一十二":"12","一十三":"13","一十四":"14","一十五":"15","一十六":"16","一十七":"17","一十八":"18","一十九":"19","二十":"20","二十一":"21","二十二":"22","二十三":"23","二十四":"24","二十五":"25","二十六":"26","二十七":"27","二十八":"18","二十九":"29","廿":"20","廿一":"21","廿二":"22","廿三":"23","廿四":"24","廿五":"25","廿六":"26","廿七":"27","廿八":"18","廿九":"29","三十":"30","三十一":"31",}
        fulltext_str_info = ''.join(res.xpath(f'({fulltext_xpath})//text()').extract())
        fulltext_str = re.sub('\s', '', fulltext_str_info)
        pub_date_info = re.findall('[O〇○Ｏ0零一二三四五六七八九十廿]{4}年[O〇○Ｏ0零一二三四五六七八九十廿]{1,3}月[O〇○Ｏ0零一二三四五六七八九十廿]{1,3}[日,号]|\d{4}年\d{1,2}月\d{1,2}[日,号]|\d{4}[\-,/]\d{1,2}[\-,/]\d{1,2}', fulltext_str)
        if not pub_date_info:
            raise Exception
        date_list = list()
        for pub_dat in pub_date_info:
            pub_dat_spl = re.split('[年,月, 日,号,\-,/]', pub_dat)
            year = ''.join([date_dict.get(x, x) for x in pub_dat_spl[0]])
            mouth = date_dict.get(pub_dat_spl[1], pub_dat_spl[1]).rjust(2, '0')
            day = date_dict.get(pub_dat_spl[2], pub_dat_spl[2]).rjust(2, '0')
            pub_date = year + mouth + day
            date_list.append(pub_date)
        now_date = time.strftime('%Y%m%d', time.localtime())
        for pub_date in date_list:
            if pub_date > now_date:
                pub_date = ''
                continue
            pub_date = str(int(pub_date))
            pub_year = pub_date[:4]
            break
    if not pub_date:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99039'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJ"
    zt_provider = "zjcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    data['sign_state'] = sign_state

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   安徽省
def policy_anhuilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="xxgk_nav_con"]/div|//ul[@class="doc_list list-6782131"]/li|//tr[@class="xxgk_nav_con"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a[@class="title"]/@href|td[@class="info"]/a/@href|a/@href').extract_first()
            base_url = f'https://www.ah.gov.cn/public/column/1681'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99040'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a[@class="title"]/text()|td[@class="info"]/a/text()|a/span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//li[@class="rq hidden-sm hidden-xs"]/text()|td[@class="fbrq"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_anhuiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_anhuiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)


    title = ''.join(res.xpath('//h1[contains(@class,"wztit")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@id="zcjdDiv"]//th[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@id="zcjdDiv"]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@id="zcjdDiv"]//th[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@id="zcjdDiv"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="zcjdDiv"]//th[text()="有"]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="wzcon j-fontContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99040'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AH"
    zt_provider = "ahcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省
def policy_fujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n', '').replace('\t', '').replace('\r', ''))
        # tcount = html_json['page']['total']
        if 'ssp/search/api' in callmodel.sql_model.list_rawid:
            total_page = int(html_json['page']['pagecount'])
        else:
            total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'ssp/search/api' in callmodel.sql_model.list_rawid:
            li_list = html_json['datas']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                rawid = li['ssp_id']
                url = f'https://www.fujian.gov.cn/hqzc/datapub/policydetail/policy_detail_{rawid}.json'
                art_url = f'https://www.fujian.gov.cn/zwgk/ztzl/yqfk/skjn/detail.htm?id={rawid}&SearchWord='
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99041'
                article_json["url"] = url
                article_json["art_url"] = art_url
                article_json["title"] = li['doctitle']
                article_json["pub_date"] = li['pubdate']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = html_json['docs']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                # base_url = 'https://www.shcm.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = li['url']
                if 'fujian.' not in url or 'htm' not in url:
                    continue
                if '_' in url.split('/')[-1]:
                    rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
                else:
                    rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99041'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['pubtime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    if 'finalIssuingUnit' in callmodel.para_dicts['data']['1_1']['html']:
        html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
        html = html_json['datas']['original']
        article_json = json.loads(callmodel.sql_model.article_json)
        title = article_json['title']
        provider_url = article_json['art_url']
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
        res = Selector(text=html)
        title = html_json['datas']['documentName']
        if not title:
            title = article_json['title'].strip()
        pub_no = html_json['datas']['documentNum']
        index_no = ''
        organ = html_json['finalIssuingUnit']
        # fulltext_xpath = '//div[@class="xxbt"]'
        fulltext = html

        down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        sub_db_id = '99041'
        rawid = callmodel.sql_model.rawid
        lngid = BaseLngid().GetLngid(sub_db_id, rawid)
        product = "FUJIAN"
        zt_provider = "fujiancngovpolicy"
        data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
        print(lngid)

        data['title'] = title
        data['provider_url'] = provider_url
        data['pub_date'] = clean_pubdate(pub_date)
        data['pub_year'] = pub_year
        data['pub_no'] = pub_no
        data['organ'] = organ
        data['index_no'] = index_no
        # data['written_date'] = clean_pubdate(written_date)
        # data['impl_date'] = clean_pubdate(impl_date)
        # data['invalid_date'] = clean_pubdate(invalid_date)
        # data['subject'] = subject
        # data['subject_word'] = subject_word
        # data['legal_status'] = legal_status

        save_data.append({'table': 'policy_latest', 'data': data})
        full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
        save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
        result.save_data = save_data

        file_info = get_file_info(data, res, f'(//body)')
        di_model_bef = DealUpdateModel()
        if file_info:
            di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
        else:
            di_model_bef.update.update({"other_dicts": "{}"})
        di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                                   "task_name": callmodel.sql_model.task_name})
        result.befor_dicts.update_list.append(di_model_bef)
    else:
        html = callmodel.para_dicts['data']['1_1']['html']
        article_json = json.loads(callmodel.sql_model.article_json)
        title = article_json['title']
        provider_url = article_json['url']
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
        res = Selector(text=html)
        title = ''.join(res.xpath('//h1[@class="xl_tit4"]//text()|//div[@class="xl-content"]//h2//text()').extract()).strip()
        if not title:
            title = article_json['title'].strip()

        if not pub_date:
            pub_date_info = ''.join(res.xpath('//meta[@name="PubDate"]/@content').extract()).strip()
            pub_date = clean_pubdate(pub_date_info)
            pub_year = pub_date[:4]
        if not pub_date:
            raise Exception
        pub_no = ''.join(res.xpath(
            '//div[@class="xl_con1"]//span[contains(text(),"发文字号：")]/following::text[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath(
            '//div[@class="xl_con1"]//span[contains(text(),"索 引 号：")]/following::text[1]/text()').extract()).strip()
        # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath(
            '//div[@class="xl_con1"]//span[contains(text(),"生成日期：")]/following::text[1]/text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath(
            '//div[@class="xl_con1"]//span[contains(text(),"有效性：")]/following::text[1]/text()').extract()).strip()
        organ = ''.join(res.xpath(
            '//div[@class="xl_con1"]//span[contains(text(),"发布机构：")]/following::text[1]/text()').extract()).strip()

        fulltext_xpath = '//div[@id="detailCont"]|//div[@id="detailCont2"]|//div[@class="TRS_Editor"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            raise Exception

        down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        sub_db_id = '99041'
        rawid = callmodel.sql_model.rawid
        lngid = BaseLngid().GetLngid(sub_db_id, rawid)
        product = "FUJIAN"
        zt_provider = "fujiancngovpolicy"
        data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
        print(lngid)

        data['title'] = title
        data['provider_url'] = provider_url
        data['pub_date'] = clean_pubdate(pub_date)
        data['pub_year'] = pub_year
        data['pub_no'] = pub_no
        data['organ'] = organ
        data['index_no'] = index_no
        data['written_date'] = clean_pubdate(written_date)
        # data['impl_date'] = clean_pubdate(impl_date)
        # data['invalid_date'] = clean_pubdate(invalid_date)
        # data['subject'] = subject
        # data['subject_word'] = subject_word
        data['legal_status'] = legal_status

        save_data.append({'table': 'policy_latest', 'data': data})
        full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
        save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
        result.save_data = save_data

        file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
        file_info2 = get_file_info(data, res, f'(//div[@class="qz-tab"])')
        file_info = file_info1 + file_info2
        di_model_bef = DealUpdateModel()
        if file_info:
            di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
        else:
            di_model_bef.update.update({"other_dicts": "{}"})
        di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                                   "task_name": callmodel.sql_model.task_name})
        result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江西省
def policy_jiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/h1/a/@href|a/@href').extract_first()
            base_url = f'http://www.jiangxi.gov.cn/col/col55602/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or '_' not in url:
                continue
            if 'art_' in url:
                rawid = re.findall('art_.*?_(.*?)\.', url.split('/')[-1])[0]
            else:
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99042'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/h1/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[3]/text()|span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="zw_title"]/p/text()|//p[@class="sp_title con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="screen xxgkTitle"]//b[contains(text(),"文") and contains(text(),"号")]/ancestor::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="screen xxgkTitle"]//span[contains(text(),"索") and contains(text(),"号")]/ancestor::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="screen xxgkTitle"]//b[contains(text(),"主") and contains(text(),"类")]/ancestor::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="screen xxgkTitle"]//b[contains(text(),"成") and contains(text(),"期")]/ancestor::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="screen xxgkTitle"]//b[contains(text(),"有") and contains(text(),"性")]/ancestor::td[1]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="screen xxgkTitle"]//b[contains(text(),"发") and contains(text(),"关")]/ancestor::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99042'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIANGXI"
    zt_provider = "jiangxicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省
def policy_shandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.shandong.gov.cn/col/col94238/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or 'art_' not in url:
                continue
            rawid = re.findall('art_.*?_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99043'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = html_json['data']['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('iid', '')
            url = f'http://www.shandong.gov.cn/jpaas-jpolicy-web-server/front/info/detail?iid={href}'
            rawid = href
            title = li['title']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99043'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.get('publishDate', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shandonglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = html_json['data']['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('iid', '')
            url = f'http://www.shandong.gov.cn/jpaas-jpolicy-web-server/front/info/explain?iid={href}'
            rawid = href
            title = li['title']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99043'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.get('publishDate', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    if 'info/explain' in provider_url:
        if article_json.get('pub_date', ''):
            date_stamp = datetime.datetime.fromtimestamp(article_json.get('pub_date', '') / 1000)
            pub_date = datetime.datetime.strftime(date_stamp, "%Y%m%d")
            pub_year = pub_date[:4]
        else:
            pub_date = ''
    else:
        pub_date = clean_pubdate(article_json.get('pub_date', ''))
        pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"标　　题：")]/parent::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="wip_art_h"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发布日期：")]/parent::td[1]/text()|//span[@class="sign"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if 'class="xxgk"' in html:
        pub_no = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"效")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"发布机关")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发文字号：")]/parent::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"成文日期：")]/parent::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"效")]/parent::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发文机关：")]/parent::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="wip_art_con"]|//div[@class="main_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99043'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHANDONG"
    zt_provider = "shandongcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河南省
def policy_henanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('data-pagecount="(\d+)"', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 515
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@id="jsearch-result-items"]/div')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[3]/div[2]/div/a[1]/text()').extract_first()
            base_url = f''
            url = href
            if not url or 'htm' not in url:
                continue
            rawid = re.findall('(.*)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99044'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[2]/a/text()').extract_first()
            article_json["pub_date"] = li.xpath('.//span[@class="jsearch-result-date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_henanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        max_count = re.findall('pagecount="(\d+)', para_dicts["data"]["1_1"]['html'])
        if max_count:
            total_page = math.ceil(int(max_count[0])/24)
        else:
            max_count = res.xpath('//a[@title="尾页"]/@data-page').extract_first()
            total_page = int(max_count) if max_count else 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="mt15 list-box"]/ul/li|//div[@class="con-box"]/ul/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.henan.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99044'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/font/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_henanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = html_json['data']['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['datas']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('id', '')
            url = li.get('selfUrl', '')
            rawid = href
            title = li['title']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99044'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.get('pubDate', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_henanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_henanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="file-box"]//b[contains(text(),"标　　题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@id="subtitle"]//text()|//h1[@id="title"]//text()|//div[@class="title_wj"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="file-box"]//b[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="file-box"]//b[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="file-box"]//b[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[@class="file-box"]//b[contains(text(),"失效时间")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="file-box"]//b[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="content"]|//div[@class="content"]|//div[@class="rule_main"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99044'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HENAN"
    zt_provider = "henancngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   湖南省
def policy_hunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('paging',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="ty-list clearfix"]/ul/li|//table[@class="table"]/tobdy/tr|//div[@class="yl-listbox"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            base_url = f'http://www.hunan.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99046'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[4]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h3[@class="sp_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//p[@class="a1"]//font[contains(text(),"文号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//p[@class="a1"]//font[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//p[@class="a1"]//font[contains(text(),"所属主题：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//p[@class="a1"]//font[contains(text(),"签署日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    invalid_date = ''.join(res.xpath('//p[@class="a1"]//font[contains(text(),"信息时效期：")]/text()').extract()).strip()
    invalid_date = invalid_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//p[@class="a1"]//font[contains(text(),"所属机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99046'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUNAN"
    zt_provider = "hunancngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   广东省
def policy_guangdonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('index_(\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"total":(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count / 20)
            total_page = 100 if total_page >= 100 else total_page
        else:
            max_count = int(max_count[-1]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'gkmlpt' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"page={page}"}
                else:
                    dic = {"page_info": f"index_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'gkmlpt' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['articles']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                base_url = f'http://www.gd.gov.cn/gkmlpt/index'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or '_' not in url.split('/')[-1]:
                    continue
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99047'
                article_json["url"] = url
                article_json["title"] = li['title']
                date_stamp = datetime.datetime.fromtimestamp(li['display_publish_time'])
                pub_date = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="viewList"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[@class="name"]/a/@href|span[@class="til"]/a/@href').extract_first()
                if not href:
                    continue
                base_url = f'http://www.gd.gov.cn/{callmodel.sql_model.list_rawid}index.html'
                url = parse.urljoin(base_url, href)
                if 'content' not in url:
                    continue
                rawid = re.findall('_(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99047'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[@class="name"]/a/text()|span[@class="til"]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[@class="time"]/text()|span[@class="date"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_guangdongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_guangdongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="introduce"]//label[contains(text(),"标题：")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="title document-number"]//text()|//h13[@class="zw-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'introduce' in html:
        pub_no = ''.join(res.xpath('//div[@class="introduce"]//label[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="introduce"]//label[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="introduce"]//label[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="introduce"]//label[contains(text(),"成文日期：")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="introduce"]//label[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="classify"]//td[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="classify"]//td[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="classify"]//td[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="classify"]//td[contains(text(),"成文日期：")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="classify"]//td[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="zw"]|//div[@class="article-content"]|//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99047'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GD"
    zt_provider = "gdcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   海南省
def policy_hainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class,"list_div")]|//div[@class="lm4"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href').extract_first()
            base_url = f'https://www.hainan.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99048'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//td[contains(text(),"发布时间：")]/text()|.//i[contains(text(),"公布日期：")]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="zwgk_comr1"]//strong[contains(text(),"文") and contains(text(),"号")]/parent::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwgk_comr1"]//strong[contains(text(),"索") and contains(text(),"号")]/parent::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwgk_comr1"]//strong[contains(text(),"主题分类")]/parent::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwgk_comr1"]//strong[contains(text(),"成文日期")]/parent::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwgk_comr1"]//strong[contains(text(),"时") and contains(text(),"性")]/parent::span[1]/div/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwgk_comr1"]//strong[contains(text(),"发文机关")]/parent::span[1]/text()').extract()).strip()

    fulltext_xpath = '//ucapcontent|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99048'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HAINAN"
    zt_provider = "hainancngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省
def policy_sichuanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['totalPage']
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data.get('results', [])
        for item in items:
            print(item)
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = parse.urljoin('http://www.sc.gov.cn', item['url'])
            rawid = item['manuscriptId']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99049'
            article_json["url"] = url
            article_json["title"] = item.get('title', '')
            article_json["wh"] = item.get('wh', '')
            article_json["publishedTime"] = item.get('publishedTime', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sichuanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="dash-table"]/tr|//div[@id="dash-table"]/ul/li|//div[@class="biaobody"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/span/font/a/@href|div[2]/a/@href|a/@href').extract_first()
            base_url = f'https://www.sc.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99049'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/span/font/a/text()|div[2]/a/text()|a/text()').extract_first().strip()
            if '13298/14097' in callmodel.sql_model.list_rawid:
                pub_date = ''
            else:
                pub_date = li.xpath('div[4]/text()|span/text()').extract_first().strip()
            article_json["publishedTime"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sichuanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sichuanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('publishedTime', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//ul[@id="articleattribute"]/li/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="topbox"]//strong[contains(text(),"文") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="topbox"]//strong[contains(text(),"索") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="topbox"]//strong[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="topbox"]//strong[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="cmsArticleContent"]|//td[@class="contText"]|//div[@class="contText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99049'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SC"
    zt_provider = "sccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   贵州省
def policy_guizhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page ):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="NewsList"]/li|//div[@class="zcjd_list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('h2/a/@href|a[last()]/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.guizhou.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99050'
            article_json["url"] = url
            article_json["title"] = li.xpath('h2/a/text()|a[last()]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('h2/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_guizhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_guizhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="DocTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"Xxgk_Info")]//li[contains(text(),"索") and contains(text(),"号")]/following::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"Xxgk_Info")]//li[contains(text(),"文") and contains(text(),"号")]/following::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"Xxgk_Info")]//li[contains(text(),"信息分类")]/following::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"Xxgk_Info")]//li[contains(text(),"生成日期")]/following::li[1]/text()').extract()).strip()
    organ = re.findall("Publisher='(.*?)'", html)
    if organ:
        organ = organ[0] if organ[0] else organ[1]
        organ = organ.replace(' ', ';')
    else:
        organ = ''
    subject = cleaned(res.xpath('//li[contains(text(),"信息分类")]/following::li[1]/text()').extract_first()).replace('\n', '')
    legal_status = re.findall("var isok='(.*?)'", html)
    if legal_status:
        legal_status = '失效' if legal_status[0] == '0' or legal_status[0] == '否' else '有效'
    else:
        legal_status = ''.strip()

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="Zoom Box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99050'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GUIZHOU"
    zt_provider = "guizhoucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省
def policy_yunnanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//dl[@class="thlist"]|//table[@class="wjlb"]/tbody/tr|//ul[@class="wjer_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[1]/a/@href|td[2]/a/@href|dt/a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.yn.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99051'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[1]/a/text()|td[2]/a/text()|dt/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[2]/text()|td[3]/text()|dd/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yunnanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yunnanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h3[contains(@class,"h3class")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"文号")]/following::dd[1]/text()').extract()).strip()
    if pub_no == '其他文件':
        pub_no = ''
    index_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"索引号")]/following::dd[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"公开日期")]/following::dd[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"来源")]/following::dd[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="arti"]|//div[@class="mart"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99051'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YN"
    zt_provider = "yncngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   陕西省
def policy_shaanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPage\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgk/fdzdgknr/zcwj/gfxwj' in callmodel.sql_model.list_rawid:
            text = re.findall('tableData:(.*?)Channels:', para_dicts["data"]["1_1"]['html'], re.S)[0].strip()[:-1].replace('\n', '').replace('\t', '')
            li_list = re.findall('\{.*?\}', text, re.S)
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = re.findall("recurl *: *'(.*?)'", li, re.S)
                if not href:
                    continue
                base_url = f'http://www.shaanxi.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
                url = parse.urljoin(base_url, href[0])
                if 'htm' not in url or '_' not in url.split('/')[-1]:
                    continue
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99052'
                article_json["url"] = url
                article_json["title"] = re.findall("name *: *'(.*?)'", li, re.S)[0]
                article_json["pub_date"] = re.findall("date *: *'(.*?)'", li, re.S)[0]
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"cm-news-list")]/li|//ul[contains(@class,"szf-cwh-l2")]/li|//ul[contains(@class,"gov-item")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div/a/@href|a/@href').extract_first()
                if not href:
                    continue
                base_url = f'http://www.shaanxi.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url or '_' not in url.split('/')[-1]:
                    continue
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99052'
                article_json["url"] = url
                if 'zfxxgk/zcjd/tjszfwj' in callmodel.sql_model.list_rawid:
                    article_json["title"] = li.xpath('a/p[@class="tit"]/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('a/p[@class="date"]/text()').extract_first().strip()
                else:
                    article_json["title"] = li.xpath('div/a/text()|a/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('span[@class="date rt"]/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shaanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shaanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="xx-tit f-tac"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'xl-head' in html:
        pub_no = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"文") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"主") and contains(text(),"类")]/following::div[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"成") and contains(text(),"成")]/following::div[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"效") and contains(text(),"态")]/following::div[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"文 号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"效力状态")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '陕西' + organ
    fulltext_xpath = '//div[@class="pages_content"]|//div[contains(@class,"szf_lfNewsDetail")]|//div[contains(@class,"html")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99052'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHAANXI"
    zt_provider = "shaanxicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   青海省
def policy_qinghailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("_000000(\d+).shtml", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        if 'zwgk' == callmodel.sql_model.list_rawid:
            total_page = max_count + 1
        else:
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'zwgk' == callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"system/more/202030000000000/0000/202030000000000_000000{str(total_page-page).rjust(2, '0')}.shtml"}
                else:
                    dic = {"page_info": f"index_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//h1[text()="通知公告"]/following::div[1]/ul/p|//table[@class="zctb"]/tbody/tr|//ul[@class="qh-sectabs-list zw-gl-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.qinghai.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            if 'zwgk' == callmodel.sql_model.list_rawid:
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            else:
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99054'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('td[2]/a//text()|a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('td[4]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qinghaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qinghaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h3[@class="title"]//text()|//h3[@class="tm"]//text()|//h1[@class="blue tc"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="forlin"]//span[contains(text(),"发") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="forlin"]//span[contains(text(),"索") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="forlin"]//span[contains(text(),"主题分类") ]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="forlin"]//span[contains(text(),"公文时效") ]/following::span[1]/text()').extract()).strip()
    if legal_status == '是':
        legal_status = '有效'
    organ = ''.join(res.xpath('//div[@class="forlin"]//span[contains(text(),"发布机构") ]/following::span[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="contentlf"]|//div[@class="con-article"]|//div[@class="details_content"]|//div[contains(@class,"zw-art-content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99054'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QINGHAI"
    zt_provider = "qinghaicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   内蒙古自治区
def policy_neimenglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['data']['total']
        total_page = math.ceil(max_count/15)
        if total_page >= 666:
            total_page = 666
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['data']['data']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['docpuburl']
            rawid = item['docid']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99055'
            article_json["url"] = url
            article_json["title"] = item.get('title', '')
            article_json["docpubtime"] = item.get('docpubtime', '')
            article_json["docno"] = item.get('docno', '')
            article_json["publisher"] = item.get('publisher', '')
            article_json["sitedesc"] = item.get('sitedesc', '')
            article_json["validity"] = item.get('validity', '')
            article_json["cdesc"] = item.get('cdesc', '')
            article_json["idxId"] = item.get('idxId', '')
            article_json["scrq"] = item.get('scrq', '')
            article_json["wenzhongtype"] = item.get('wenzhongtype', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_neimenglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="c_newslistfz"]/li|//table[@id="table1"]/tbody/tr|//div[contains(@class,"zcjdlie")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or '_' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99055'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/text()|td[2]/a/text()|a/text()').extract_first().strip()
            article_json["docpubtime"] = li.xpath('span/text()|td[6]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_neimengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_neimengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('docpubtime', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="ql_detailbro_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'ql_detailbro_table' in html:
        pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''
        organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        written_date = clean_pubdate(article_json.get('scrq', ''))
        pub_no = cleaned(article_json.get('docno', ''))
        index_no = cleaned(article_json.get('idxId', ''))
        organ = cleaned(article_json.get('publisher', ''))
        legal_status = cleaned(article_json.get('validity', ''))
        subject = cleaned(article_json.get('cdesc', ''))
    if organ.startswith('自治'):
        organ = '内蒙古' + organ
    fulltext_xpath = '//div[@class="text"]|//div[contains(@class,"view")]|//div[@id="djh_xilanneirong"]|//div[@class="pages_content"]|//div[@class="tpl-attach"]|//div[@id="pagecontent"]|//div[@class="TRS_Editor"]|//div[@class="xl_zw BSHARE_POP"]|//div[@id="zoomfont"]|//div[@id="d_show"]|//div[@id="Zoom"]|//div[@id="pare"]|//div[@class="qgl_openinf_container_box niceScrollBox"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99055'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NMG"
    zt_provider = "nmgcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   广西壮族自治区
def policy_guangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['pager']['pageCount']
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['datas']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['DOCPUBURL']
            if '.pdf' in url or not url:
                continue
            rawid = item['DOCID1']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99056'
            article_json["url"] = url
            article_json["title"] = item.get('DOCTITLE', '')
            article_json["PubDate"] = item.get('PubDate', '')
            article_json["CWRQ"] = item.get('CWRQ', '')
            article_json["IdxID"] = item.get('IdxID', '')
            article_json["fileNum2"] = item.get('fileNum2', '')
            article_json["publisher"] = item.get('publisher', '')
            article_json["SITENAME"] = item.get('SITENAME', '')
            article_json["Effectivestate"] = item.get('Effectivestate', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_guangxilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="more-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.gxzf.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0].replace('t', '', 1)

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99056'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["PubDate"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_guangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_guangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="article"]/h1//text()|//h1[@class="contentTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发布日期：")]/parent::td[1]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = article_json.get('PubDate', '')
        if '年' in pub_date_info:
            year = re.findall('(\d+)年', pub_date_info)
            year = year[0].rjust(4, '0') if year else '0000'
            mouth = re.findall('(\d+)月', pub_date_info)
            mouth = mouth[0].rjust(2, '0') if mouth else '00'
            day = re.findall('(\d+)日', pub_date_info)
            day = day[0].rjust(2, '0') if day else '00'
            pub_date = year + mouth + day
        else:
            pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"索")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"成文日期")]/parent::td[1]/text()').extract()).strip()
    legal_status = article_json.get('Effectivestate', '')
    legal_status = '有效' if legal_status == '1' or legal_status == '有效' else ''
    organ = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发文单位")]/parent::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="articleFile"]|//div[@class="article-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99056'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXZF"
    zt_provider = "gxzfcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   西藏自治区
def policy_xizanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 19
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="zwyw_list clearfix"]/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.xizang.gov.cn/zwgk/xxfb/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('_(.*)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99057'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xizangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xizangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = "".join(res.xpath('//td[contains(text(),"标") and contains(text(),"题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = "".join(res.xpath('//p[@class="inptit"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'])
    pub_year = pub_date[:4]
    pub_no = cleaned(res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract_first())
    index_no = cleaned(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    if not organ:
        organ_info = res.xpath('//p[@class="vvx-time-author lf"]').extract_first()
        organ = cleaned(organ_info.split('来源：')[-1])
        organ = organ.split(' ')[0]
    if "..." in title:
        title = cleaned(res.xpath('//td[contains(text(),"标")]/following::td[1]/text()|//p[@class="inptit"]/text()').extract_first())
    fulltext = res.xpath('//div[@class="vw-art-list"]').extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99057'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'XIZANG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'xizangcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['written_date'] = written_date
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, '(//div[@class="vw-art-list"])')
    fj_text = re.findall('.html\("(.*?)"\)', html)
    res = Selector(text=fj_text[0] if fj_text else '')
    file_info2 = get_file_info(data, res, '')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区
def policy_ningxialist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="commonList_dot"]/li|//div[@class="list-con"]/ul/li|//div[@class="zfxxgk_zdgkc"]/ul/li|//div[@class="list-box"]/div')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="title"]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or 'nx' not in url:
                continue
            rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99058'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/div/text()|div[@class="title"]/a/text()|a/text()').extract_first().strip()
            if 'zwgk/zc/gzk' in callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''
            else:
                article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ningxiaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ningxiaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"标题：")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@id="info_title"]//text()|//div[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_date_info = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"发布时间：")]/following::span[1]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[@id="info_released_dtime"]//text()|//div[@class="Article_ly"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"发文字号：")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"主题分类：")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"成文时间：")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"有效性：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="svobjcat"]//span[contains(text(),"责任部门：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('自治'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@id="info_content"]|//div[@id="ofdneed"]|//div[@class="con-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99058'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NX"
    zt_provider = "nxcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山西
def policy_shanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xzgfxwj/yxwj' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//dl[@class="sxinfo-pubfiles-item"]|//dl[@class="sxinfo-pubfiles-item"]/ul/li|//dl[contains(@class,"sxszf-mlists-items")]//ul/li')
        if not li_list:
            raise Exception
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('dt/a/@href|a/@href').extract_first()
            base_url = f'http://www.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or 'shanxi' not in url:
                continue
            if '_' in url.split('/')[-1]:
                rawid = re.findall('_(.*?)\.', url.split('/')[-1])[0]
            else:
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99034'
            article_json["url"] = url
            article_json["title"] = li.xpath('dt/a/text()|a/text()').extract_first().strip()
            if 'xzgfxwj/yxwj' in callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''
            else:
                article_json["pub_date"] = li.xpath('dd/i[2]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"标") and contains(text(),"题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//ul[@class="sxgzk-detail-filed"]//i[contains(text(),"文件名称")]/following::em[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="detail-article-title clearfix"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//ul[@class="sxgzk-detail-filed"]//i[contains(text(),"发布日期")]/following::em[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if 'affairs-detail-head' in html:
        pub_no = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''
        organ = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//ul[@class="sxgzk-detail-filed"]//i[contains(text(),"文件文号")]/following::em[1]/text()').extract()).strip()
        index_no = ''
        subject = ''
        written_date = ''.join(res.xpath('//ul[@class="sxgzk-detail-filed"]//i[contains(text(),"成文日期")]/following::em[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//ul[@class="sxgzk-detail-filed"]//i[contains(text(),"状态")]/following::em[1]/text()').extract()).strip()
        organ = ''

    fulltext_xpath = '//div[contains(@class,"article-body")]|//div[@class="affairs-detail-inner-cnt oflow-hd"]|//dt[@class="fl_pc"]|//div[@class="sxgzk-detail-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99034'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHANXI"
    zt_provider = "shanxicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区
def policy_xinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        max_count = data['totalPage']
        total_page = max_count
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        items = data['results']
        for item in items:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['url']
            if '.pdf' in url:
                continue
            rawid = item['manuscriptId']
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99059'
            article_json["url"] = 'http://www.xinjiang.gov.cn' + url
            article_json["title"] = item.get('title', '')
            article_json["publishedTime"] = item.get('publishedTime', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xinjianglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//div[@class="contitle"]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.xinjiang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99059'
            article_json["url"] = url
            if 'xinjiang/tzgg' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('.//div[@class="contitle"]/a/text()|a/@title').extract_first().strip()
            else:
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["publishedTime"] = li.xpath('.//span[@class="time"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('publishedTime', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="syhbox"]//font[text()="标"]/parent::li[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="data-title"]//text()|//div[@class="detail"]/h1//text()|//div[@class="gknbxq_top"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_info = cleaned(res.xpath('//li[contains(text(),"发文字号")]/text()').extract_first())
    pub_no = cleaned(pub_no_info.split('：')[-1])
    index_no_info = cleaned(res.xpath('//li[contains(text(),"索引号")]/text()').extract_first())
    index_no = cleaned(index_no_info.split('：')[-1])
    organ_info = cleaned(res.xpath('//li[contains(text(),"发文机关")]/text()').extract_first())
    organ = cleaned(organ_info.split('：')[-1])
    written_date_info = cleaned(res.xpath('//li[contains(text(),"成文日期")]/text()').extract_first())
    written_date = cleaned(written_date_info.split('：')[-1])
    subject_info = cleaned(res.xpath('//li[contains(text(),"主题分类")]/text()').extract_first())
    subject = cleaned(subject_info.split('：')[-1])
    legal_status_info = cleaned(res.xpath('//li[contains(text(),"有效性")]/text()').extract_first())
    legal_status = cleaned(legal_status_info.split('：')[-1])
    if not pub_no:
        pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号")]/parent::li[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索")]/parent::li[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="syhbox"]//font[text()="主题分类："]/parent::li[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[text()="成文日期："]/parent::li[1]/span/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[text()="有"]/parent::li[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="syhbox"]//font[text()="发文机关："]/parent::li[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99059'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XINJIANG"
    zt_provider = "xinjiangcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_pbcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    pub_date = clean_pubdate(res.xpath('//span[@id="shijian"]/text()').extract_first())
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
    title = ''.join(res.xpath('//div[@class="content_box"]/h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[@id="con_fg"]//span[contains(text(),"文号：")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@id="con_fg"]//span[contains(text(),"索引号")]/parent::li[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//ul[@id="con_fg"]//span[contains(text(),"生效日期：")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//ul[@id="con_fg"]//span[contains(text(),"主题词：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@id="con_fg"]//span[contains(text(),"发文机关：")]/parent::li[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="zoom"]|//font[@id="zoom"]|//div[@class="txt_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99009'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PBC"
    zt_provider = "pbccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_customsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]

    title = "".join(res.xpath('//div[@class=" easysite-news-title"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = "".join(res.xpath('//meta[@name="PubDate"]/@content').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]

    index_no = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"索")]/parent::div[1]//text()').extract()).strip()
    index_no = index_no.split('】', 1)[-1].strip()
    organ = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"发文机关")]/parent::div[1]//text()').extract()).strip()
    organ = organ.split('】', 1)[-1].strip()
    organ = re.sub(' +', ';', organ)
    raw_type = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"法规类型")]/parent::div[1]//text()').extract()).strip()
    raw_type = raw_type.split('】', 1)[-1].strip()
    subject = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"内容类别")]/parent::div[1]//text()').extract()).strip()
    subject = subject.split('】', 1)[-1].strip()
    legal_status = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"效力")]/parent::div[1]//text()').extract()).strip()
    legal_status = legal_status.split('】', 1)[-1].strip()
    if '请选择' in legal_status:
        legal_status = ''
    written_date = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"成文日期")]/parent::div[1]//text()').extract()).strip()
    written_date = written_date.split('】', 1)[-1].strip()
    impl_date = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"生效日期")]/parent::div[1]//text()').extract()).strip()
    impl_date = impl_date.split('】', 1)[-1].strip()
    pub_no = ''.join(res.xpath('//div[@id="hgfg_con"]//span[contains(text(),"文") and contains(text(),"号")]/parent::div[1]//text()').extract()).strip()
    pub_no = pub_no.split('】', 1)[-1].strip()

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="easysiteText"]|//div[@class="zcjd_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99021'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CUSTOMS"
    zt_provider = "customscngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    data['impl_date'] = clean_pubdate(impl_date)
    data['written_date'] = clean_pubdate(written_date)
    data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_hubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    pub_date = cleaned(res.xpath('//strong[contains(text(),"发布日期")]/parent::div[1]/text()').extract()).strip()
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//strong[contains(text(),"索")]/parent::div[1]/text()').extract()).strip()
    organ = cleaned(res.xpath('//strong[contains(text(),"发布机构")]/parent::div[1]/text()').extract()).strip()
    subject = cleaned(res.xpath('//strong[contains(text(),"分")]/parent::div[1]/text()').extract()).strip()
    legal_status = cleaned(res.xpath('//strong[contains(text(),"效力状态")]/parent::div[1]/text()').extract()).strip()
    written_date = cleaned(res.xpath('//strong[contains(text(),"发文日期")]/parent::div[1]/text()').extract()).strip()
    pub_no = cleaned(res.xpath('//strong[contains(text(),"文    号")]/parent::div[1]/text()').extract()).strip()

    fulltext = res.xpath('//div[@class="hbgov-article-content"]|//div[@class="row content_block"]').extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99045'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HUBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hubeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data
    file_info = get_file_info(data, res, '(//div[@class="hbgov-article-content"])')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_gansuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    # title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    # pub_date = cleaned(article_json['pub_date'])
    # pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="main mt8"]/h1/text()|//div[@class="main"]/h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"索")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"发文机关")]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"主题分类")]/span/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"成文日期")]/span/text()').extract()).strip()
    pub_no = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"发文字号")]/span/text()').extract()).strip()

    pub_date_info = ''.join(res.xpath('//div[@class="contenttitle"]//font[contains(text(),"发布日期")]/span/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = cleaned(res.xpath('//div[contains(@class,"info")]/p[contains(text(),"日期")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
        organ = pub_date_info.split('来源：')[-1] if '来源：' in pub_date_info else ''
    if organ.startswith('省'):
        organ = '甘肃' + organ
    if not pub_date:
        raise Exception
    if not title:
        raise Exception
    fulltext = res.xpath('//div[@id="detailContent"]').extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99053'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'GANSU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'gansucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    # data['legal_status'] = legal_status
    data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, '(//div[@id="detailContent"])')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省发改委
def policy_fzggwzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://fzggw.zj.gov.cn/col/col1229565788/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99060'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fzggwzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fzggwzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//p[contains(@class,"con-title")]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"统一编号")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//td[contains(text(),"文件编号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99060'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'FZGGWZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'fzggwzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省经信厅
def policy_jxtzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jxt.zj.gov.cn/col/col1582899/index.html?'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99061'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxtzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxtzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[contains(@class,"art_title")]/h2/text()|//p[@class="con-title"]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//a[contains(text(),"索引号:")]/text()').extract_first())
    index_no = index_no.replace('索引号: ', '')
    organ = cleaned(res.xpath('//a[contains(text(),"发布机构:")]/text()').extract_first())
    organ = organ.replace('发布机构: ', '')
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//a[contains(text(),"发文字号:")]/text()').extract_first())
    pub_no = pub_no.replace('文号：', '')
    pub_no = pub_no.replace('发文字号：', '')
    # legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ and organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99061'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JXTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jxtzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省科技厅
def policy_kjtzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 14)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 14 + 1
                end = (page + 2) * 14
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://kjt.zj.gov.cn/col/col1229080140/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99062'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[contains(@class,"art_title")]/h2/text()|//div[@class="article-top"]/p/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//td[contains(text(),"文号：")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性：")]/following::td[1]/text()').extract_first())
    # raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"规范性文件登记号")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="article-conter"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99062'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'KJTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'kjtzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省教育厅
def policy_jytzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//table//table/tr')[1:]
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'http://jyt.zj.gov.cn/col/col1532802/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99063'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//td[@class="title"]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号:")]/text()').extract_first())
    index_no = index_no.replace('索引号: ', '')
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构:")]/text()').extract_first())
    organ = organ.replace('发布机构: ', '')
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类: ")]/text()').extract_first())
    subject = subject.replace('主题分类: ', '')
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//td[contains(text(),"文件编号:")]/text()').extract_first())
    pub_no = pub_no.replace('文件编号:', '')
    # legal_status = cleaned(res.xpath('//td[contains(text(),"有效性：")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99063'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JYTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jytzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省民政厅
def policy_mztzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://mzt.zj.gov.cn/col/col1633560/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99064'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[@class="art_title"]/h2[1]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = ''.join(res.xpath('//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布单位")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文　　号")]/following::td[1]/text()').extract()).strip()
    # legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ and organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99064'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MZTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mztzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省财政厅
def policy_cztzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://czt.zj.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99065'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[@class="art_title"]/h2[1]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    # index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = ''.join(res.xpath('//span[contains(text(),"信息来源：")]//text()').extract()).strip()
    organ = organ.replace('信息来源：', '')
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    # pub_no = cleaned(res.xpath('//td[contains(text(),"文件编号")]/following::td[1]/text()').extract_first())
    # legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ and organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99065'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CZTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'cztzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省人力社保厅
def policy_rlsbtzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 10)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 10 + 1
                end = (page + 2) * 10
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://rlsbtzj.zj.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99066'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span[2]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rlsbtzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rlsbtzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[contains(text(),"法规名称")]/following::td[1]/div/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
    raw_type = cleaned(res.xpath('//td[contains(text(),"体裁分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//td[contains(text(),"法规文号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"统一编号")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//td[contains(text(),"法规正文")]/following::td[1]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99066'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'RLSBTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'rlsbtzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省农业农村厅
def policy_nynctzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://nynct.zj.gov.cn/col/col1229142011/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99067'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//h1[@class="ncdt-list-title"]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
    # raw_type = cleaned(res.xpath('//td[contains(text(),"体裁分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(res.xpath('//td[contains(text(),"生成日期")]/following::td[1]/text()').extract_first())
    pub_no = cleaned(res.xpath('//td[contains(text(),"文件编号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"统一编号")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="ncdt-list-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99067'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NYNCTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nynctzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省住房和城乡建设厅
def policy_jstzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jst.zj.gov.cn/col/col1229125874/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99068'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jstzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jstzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)

    title = ''.join(res.xpath('//td[@class="title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    if 'class="xxgk"' in html:
        index_no = cleaned(res.xpath('//table[@class="xxgk"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
        organ = cleaned(res.xpath('//table[@class="xxgk"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
        # subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
        # raw_type = cleaned(res.xpath('//td[contains(text(),"体裁分类")]/following::td[1]/text()').extract_first())
        # subject_word = cleaned(
        #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
        written_date = cleaned(res.xpath('//table[@class="xxgk"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract_first())
        pub_no = cleaned(res.xpath('//table[@class="xxgk"]//th[contains(text(),"文号")]/following::td[1]/text()').extract_first())
        legal_status = cleaned(res.xpath('//table[@class="xxgk"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
        rawid_alt = cleaned(res.xpath('//table[@class="xxgk"]//th[contains(text(),"规范性文件登记号")]/following::td[1]/text()').extract_first())
    else:
        index_no = cleaned(res.xpath('//table[@class="xxgk_article"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
        organ = cleaned(res.xpath('//table[@class="xxgk_article"]//td[contains(text(),"发布单位")]/following::td[1]/text()').extract_first())
        # subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
        # raw_type = cleaned(res.xpath('//td[contains(text(),"体裁分类")]/following::td[1]/text()').extract_first())
        # subject_word = cleaned(
        #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
        written_date = ''
        pub_no = cleaned(res.xpath('//table[@class="xxgk_article"]//td[contains(text(),"文　　号")]/following::td[1]/text()').extract_first())
        legal_status = cleaned(res.xpath('//table[@class="xxgk_article"]//td[contains(text(),"有 效 性")]/following::td[1]/text()').extract_first())
        rawid_alt = cleaned(res.xpath('//table[@class="xxgk_article"]//td[contains(text(),"规范性文件登记号")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ and organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99068'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JSTZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jstzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省卫健委
def policy_wsjkwzjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 14)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 14 + 1
                end = (page + 2) * 14
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://wsjkw.zj.gov.cn/col/col1229055217/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99069'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwzjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwzjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('.', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索 引 号：")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract_first())
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = cleaned(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract_first())
    written_date = written_date.replace('-', '')
    pub_no = cleaned(res.xpath('//td[contains(text(),"文 号：")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性：")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"文件登记号")]/following::td[1]/text()').extract_first())
    if '浙江' not in organ:
        organ = '浙江' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99069'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WSJKWZJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wsjkwzjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省杭州市
def policy_hangzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 14)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 14 + 1
                end = (page + 2) * 14
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.hangzhou.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99070'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first()
            article_json["pub_date"] = li.xpath('b/text()|span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hangzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hangzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)

    title = cleaned(res.xpath('//td[@class="title"]/text()').extract_first())
    if not title:
        title = cleaned(res.xpath('//div[@class="title"]/text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(res.xpath('//span[@class="day"]/text()').extract_first())
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布单位:")]/following::td[1]/text()').extract_first())
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类:")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = cleaned(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract_first())
    written_date = written_date.replace('-', '')
    pub_no = cleaned(res.xpath('//td[contains(text(),"文号:")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"统一编号")]/following::td[1]/text()').extract_first())
    if '杭州' not in organ and organ:
        organ = '杭州' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]|//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99070'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HANGZHOU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hangzhoucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省宁波市
def policy_ningbolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//table[@class="ltable"]/tr')[1:]
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'http://www.ningbo.gov.cn/col/col1229106589/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99071'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first()
            article_json["pub_date"] = li.xpath('td[2]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ningboarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ningboarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    if 'class="xxgktype"' in html:
        index_no = cleaned(res.xpath('//div[@class="xxgktype"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract_first())
        subject = cleaned(res.xpath('//div[@class="xxgktype"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract_first())
        organ = cleaned(res.xpath('//div[@class="xxgktype"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract_first())
    else:
        index_no = cleaned(res.xpath('//table[@class="table1"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
        organ = cleaned(res.xpath('//table[@class="table1"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
        subject = cleaned(res.xpath('//table[@class="table1"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
    if ('宁波' not in organ) and organ:
        organ = '宁波' + organ
    if not organ:
        organ = ''.join(res.xpath('//div[@class="rul_note"]/p/text()').extract()).replace('发布', '').strip()
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="zoom"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99071'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NINGBO'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'ningbocngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省温州市
def policy_wenzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 20)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 20 + 1
                end = (page + 2) * 20
                if end >= max_count:
                    end = max_count
                url_part = "dataproxy"
                sourceContentType = "1"
                unitid = "5935759"
                if ',' in callmodel.sql_model.list_rawid:
                    url_part = "morecolumndataproxy"
                    sourceContentType = "3"
                    unitid = "6510888"
                dic = {"start": f"{start}", "end": f"{end}", "url_part": url_part, "sourceContentType":sourceContentType, "unitid":unitid}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href').extract_first()
            base_url = f'http://www.wenzhou.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)

            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99072'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/a/@title').extract_first()
            article_json["pub_date"] = li.xpath('div[2]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wenzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wenzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = "".join(res.xpath('//div[@class="title_old"]/p//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('.', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号：")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]/div/text()').extract_first())
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = cleaned(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract_first())
    written_date = written_date.replace('-', '')
    pub_no = cleaned(res.xpath('//td[contains(text(),"文 号：")]/following::td[1]/text()').extract_first())
    # legal_status = cleaned(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract_first())
    if '温州' not in organ:
        organ = '温州' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99072'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WENZHOU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wenzhoucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省湖州市
def policy_huzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        html = html.replace('/n', '').replace('/t', '')
        html_info = re.findall('\((.*)\)', html)[0]
        html_dict = json.loads(html_info)
        total_page = html_dict['pages']
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                pageno = base64.b64encode(str(page).encode()).decode()
                dic = {"pageno": pageno}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        for item in html_dict['infolist']:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item['url']
            if 'art_' in url:
                rawid = re.findall('art_(.*?)\.htm', url)[0]
            else:
                rawid = url.split('/')[-1].split('.')[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99073'
            article_json["url"] = url
            article_json["title"] = item['title']
            article_json["pub_date"] = item['opendate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = "".join(res.xpath('//div[@class="title1"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_date = cleaned(article_json['pub_date'])
    if not pub_date:
        pub_date = cleaned(res.xpath('//meta[@name="PubDate"]/@content').extract_first())
    if not pub_date:
        pub_date = cleaned(res.xpath('//td[contains(text(),"发文时间：")]/following::td[1]/text()').extract_first())
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号：")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract())
    # subject = cleaned(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract_first())
    # written_date = written_date.replace('-', '')
    pub_no = cleaned(res.xpath('//td[contains(text(),"文件编号：")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有 效 性：")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"统一编号")]/following::td[1]/text()').extract())
    if '湖州' not in organ:
        organ = '湖州' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="zhengw"]|//div[@id="zhengw"]|//div[@class="pdf"]'
    fulltext = ' '.join(res.xpath(fulltext_xpath).extract())
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99073'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HUZHOU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'huzhoucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if 'class="pdf"' in html:
        pub_year = data['pub_year']
        keyid = data['keyid']
        url_str = re.findall('&pathfile=(.*?)";', html)
        if url_str:
            file_url = parse.unquote(url_str[0])
            file_info.append({'url': file_url, 'name': file_url, 'pub_year': pub_year, 'keyid': keyid})
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省嘉兴市
def policy_jiaxinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jiaxing.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99074'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiaxingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiaxingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//td[@class="title"]/text()').extract())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//td[contains(text(),"文号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    if '嘉兴' not in organ and organ:
        organ = '嘉兴' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99074'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JIAXING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jiaxingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省绍兴市
def policy_sxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'http://www.sx.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99075'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/@title').extract_first()
            article_json["pub_date"] = li.xpath('td[2]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[@class="art_title"]/h2/text()').extract())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()|//th[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()|//th[contains(text(),"发文机关")]/following::td[1]/text()').extract_first())
    subject = cleaned(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()|//th[contains(text(),"主题分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//*[contains(text(),"发文字号")]/following::td[1]//text()|//th[contains(text(),"发文字号")]/following::td[1]//text()').extract())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()|//th[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"文件登记号")]/following::td[1]/text()|//th[contains(text(),"统一编号")]/following::td[1]/text()').extract_first())
    if '绍兴' not in organ:
        organ = '绍兴' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99075'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SX'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'sxcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省金华市
def policy_jinhualist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jinhua.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99076'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinhua1list_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jinhua.gov.cn/col/col1229106589/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99076'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinhuaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jinhuaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//h1[@class="text-tag"]/text()').extract())
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//strong[contains(text(),"索 引 号：")]/parent::th/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//strong[contains(text(),"发布机构")]/parent::th/following::td[1]/text()').extract_first())
    if not organ:
        organ = cleaned(res.xpath('//strong[contains(text(),"发文机关")]/parent::th/following::td[1]/text()').extract_first())
    subject = cleaned(res.xpath('//strong[contains(text(),"主题分类")]/parent::th/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = clean_pubdate(res.xpath('//strong[contains(text(),"成文日期：")]/parent::th/following::td[1]/text()').extract_first())
    pub_no = cleaned(res.xpath('//strong[contains(text(),"文　号：")]/parent::th/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//strong[contains(text(),"有 效 性：")]/parent::th/following::td[1]/text()').extract_first())
    if '金华' not in organ and organ:
        organ = '金华' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="jh_xl_m2"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99076'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JINHUA'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jinhuacngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省衢州市
def policy_qzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.qz.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99077'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qzlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.qz.gov.cn/col/col1229106589/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99077'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result

def policy_qzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="main"]/div/div/table[2]/tbody/tr/td/table[1]//td/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = clean_pubdate(res.xpath('//td[contains(text(),"成文日期")]/following::td[1]/text()').extract_first())
    pub_no = cleaned(res.xpath('//td[contains(text(),"文件编号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"统一编号")]/following::td[1]/text()').extract_first())
    if not organ:
        organ = cleaned(res.xpath('//nobr[contains(text(),"信息来源：")]/text()').extract()).replace('信息来源：', '')
        organ = organ.strip()
    if not organ:
        organ = cleaned(res.xpath('//div[contains(@class,"article-bottom")]/text()').extract()).replace('发布', '')
        organ = organ.strip()
    if ('衢州' not in organ) and organ:
        organ = '衢州' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="main"]/div/div/table[2]/tbody/tr/td/table[3]|//div[@id="zoom"]|//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99077'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'QZ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'qzcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省舟山市
def policy_zhoushanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 1) or (turn_page == 8 and page_index < 2):
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://xxgk.zhoushan.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99078'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhoushanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhoushanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//span[@class="sp_title"]/text()|//div[@class="sp_title"]/text()').extract())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    # written_date = cleaned(article_json['create_date'])
    pub_no = cleaned(res.xpath('//td[contains(text(),"文号")]/following::td[1]/text()').extract_first())
    # legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    if '舟山' not in organ:
        organ = '舟山' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99078'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZHOUSHAN'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zhoushancngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    # data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    # data['written_date'] = written_date
    # data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省台州市
def policy_zjtzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        total_page = int(res.xpath('//span[@id="totalpages"]/text()').extract_first())
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//tr[contains(@class,"tr_main_value")]')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'http://www.zjtz.gov.cn/col/col1229056495/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99079'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/@title').extract_first()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="tit text-tag"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(article_json['pub_date'].replace('-', ''))
    pub_year = pub_date[:4]

    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = clean_pubdate(res.xpath('//td[contains(text(),"成文日期")]/following::td[1]/text()').extract_first())
    pub_no = cleaned(res.xpath('//td[contains(text(),"发文字号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    rawid_alt = cleaned(res.xpath('//td[contains(text(),"规范性文件登记号")]/following::td[1]/text()').extract_first())
    # if not organ:
    #     organ = cleaned(res.xpath('//nobr[contains(text(),"信息来源：")]/text()').extract()).replace('信息来源：', '')
    #     organ = organ.strip()
    if not organ:
        organ = cleaned(res.xpath('//div[contains(@class,"zc_article_bottom")]/text()').extract()).replace('发布', '')
        organ = organ.strip()
    if ('台州' not in organ) and organ:
        organ = '台州' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[contains(@class,"zoomnr")]|//div[@id="zoom"]|//div[@class="zc_article_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99079'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZJTZ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zjtzcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    # data['raw_type'] = raw_type
    data['rawid_alt'] = rawid_alt
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   浙江省丽水市
def policy_lishuilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 395
        total_page = math.ceil(max_count / 10)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 10 + 1
                end = (page + 2) * 10
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href|a/@href').extract_first()
            base_url = f'https://www.lishui.gov.cn/col/col{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('art_(.*?)\.htm', url)[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99080'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/@title').extract_first()
            if '1229283444' == callmodel.sql_model.list_rawid:
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first()
            else:
                article_json["pub_date"] = li.xpath('td[3]/text()|span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lishuiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lishuiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class,"xl_title")]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()


    index_no = cleaned(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract_first())
    organ = cleaned(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract_first())
    # subject = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    raw_type = cleaned(res.xpath('//td[contains(text(),"组配分类")]/following::td[1]/text()').extract_first())
    # subject_word = cleaned(
    #     res.xpath('//font[contains(text(),"主") and contains(text(),"词")]/span/text()').extract_first())
    written_date = cleaned(res.xpath('//td[contains(text(),"成文日期")]/following::td[1]/text()').extract_first())
    written_date = clean_pubdate(written_date)
    pub_no = cleaned(res.xpath('//td[contains(text(),"文号")]/following::td[1]/text()').extract_first())
    legal_status = cleaned(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract_first())
    pub_date = cleaned(res.xpath('//span[contains(text(),"时间：")]/text()').extract_first())
    pub_date = pub_date.replace('发布时间：', '').replace('时间：', '').split(' ')[0].strip()
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'].replace('-', ''))
        pub_year = pub_date[:4]
    if '省' in organ and '浙江' not in organ:
        organ = '浙江' + organ
    elif '丽水' not in organ and organ:
        organ = '丽水' + organ
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@class="zoom"]|//div[@id="zoom"]|//div[@class="info-cont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99080'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'LISHUI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'lishuicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['subject'] = subject
    data['legal_status'] = legal_status
    # data['subject_word'] = subject_word
    data['written_date'] = written_date
    data['raw_type'] = raw_type
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'

    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result
