import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64

import requests
from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_hbdrchebeilist_callback",
    "policy_hbdrchebeiarticle_callback",
    "policy_hbdrchebeiarticle_etl_callback",
    "policy_gxthebeilist_callback",
    "policy_gxthebeiarticle_callback",
    "policy_gxthebeiarticle_etl_callback",
    "policy_kjthebeilist_callback",
    "policy_kjthebeiarticle_callback",
    "policy_kjthebeiarticle_etl_callback",
    "policy_jythebeilist_callback",
    "policy_jythebeiarticle_callback",
    "policy_jythebeiarticle_etl_callback",
    "policy_minzhenghebeilist_callback",
    "policy_minzhenghebeiarticle_callback",
    "policy_minzhenghebeiarticle_etl_callback",
    "policy_czthebeilist_callback",
    "policy_czthebeiarticle_callback",
    "policy_czthebeiarticle_etl_callback",
    "policy_rsthebeilist_callback",
    "policy_rsthebeiarticle_callback",
    "policy_rsthebeiarticle_etl_callback",
    "policy_nynchebeilist_callback",
    "policy_nynchebeiarticle_callback",
    "policy_nynchebeiarticle_etl_callback",
    "policy_zfcxjsthebeilist_callback",
    "policy_zfcxjsthebeiarticle_callback",
    "policy_zfcxjsthebeiarticle_etl_callback",
    "policy_wsjkwhebeilist_callback",
    "policy_wsjkwhebeiarticle_callback",
    "policy_wsjkwhebeiarticle_etl_callback",
    "policy_sjzlist_callback",
    "policy_sjzarticle_callback",
    "policy_sjzarticle_etl_callback",
    "policy_tangshanlist_callback",
    "policy_tangshanarticle_callback",
    "policy_tangshanarticle_etl_callback",
    "policy_qhdlist_callback",
    "policy_qhdarticle_callback",
    "policy_qhdarticle_etl_callback",
    "policy_hdlist_callback",
    "policy_hdarticle_callback",
    "policy_hdarticle_etl_callback",
    "policy_xingtailist_callback",
    "policy_xingtaiarticle_callback",
    "policy_xingtaiarticle_etl_callback",
    "policy_xingtailist1_callback",
    "policy_xingtaiarticle1_callback",
    "policy_xingtaiarticle1_etl_callback",
    "policy_baodinglist_callback",
    "policy_baodingarticle_callback",
    "policy_baodingarticle_etl_callback",
    "policy_zjklist_callback",
    "policy_zjkarticle_callback",
    "policy_zjkarticle_etl_callback",
    "policy_chengdelist_callback",
    "policy_chengdearticle_callback",
    "policy_chengdearticle_etl_callback",
    "policy_cangzhoulist_callback",
    "policy_cangzhouarticle_callback",
    "policy_cangzhouarticle_etl_callback",
    "policy_lflist_callback",
    "policy_lflist1_callback",
    "policy_lfarticle_callback",
    "policy_lfarticle_etl_callback",
    "policy_hengshuilist_callback",
    "policy_hengshuiarticle_callback",
    "policy_hengshuiarticle_etl_callback",
    "policy_fgwshanxilist_callback",
    "policy_fgwshanxiarticle_callback",
    "policy_fgwshanxiarticle_etl_callback",
    "policy_gxtshanxilist_callback",
    "policy_gxtshanxiarticle_callback",
    "policy_gxtshanxiarticle_etl_callback",
    "policy_kjtshanxilist_callback",
    "policy_kjtshanxiarticle_callback",
    "policy_kjtshanxiarticle_etl_callback",
    "policy_jytshanxilist_callback",
    "policy_jytshanxiarticle_callback",
    "policy_jytshanxiarticle_etl_callback",
    "policy_mztshanxilist_callback",
    "policy_mztshanxiarticle_callback",
    "policy_mztshanxiarticle_etl_callback",
    "policy_cztshanxilist_callback",
    "policy_cztshanxiarticle_callback",
    "policy_cztshanxiarticle_etl_callback",
    "policy_rstshanxilist_callback",
    "policy_rstshanxiarticle_callback",
    "policy_rstshanxiarticle_etl_callback",
    "policy_nynctshanxilist_callback",
    "policy_nynctshanxiarticle_callback",
    "policy_nynctshanxiarticle_etl_callback",
    "policy_zjtshanxilist_callback",
    "policy_zjtshanxiarticle_callback",
    "policy_zjtshanxiarticle_etl_callback",
    "policy_wjwshanxilist_callback",
    "policy_wjwshanxiarticle_callback",
    "policy_wjwshanxiarticle_etl_callback",
    "policy_taiyuanlist_callback",
    "policy_taiyuanlist1_callback",
    "policy_taiyuanarticle_callback",
    "policy_taiyuanarticle_etl_callback",
    "policy_dtlist_callback",
    "policy_dtarticle_callback",
    "policy_dtarticle_etl_callback",
    "policy_shuozhoulist_callback",
    "policy_shuozhouarticle_callback",
    "policy_shuozhouarticle_etl_callback",
    "policy_sxxzlist_callback",
    "policy_sxxzarticle_callback",
    "policy_sxxzarticle_etl_callback",
    "policy_yqlist_callback",
    "policy_yqarticle_callback",
    "policy_yqarticle_etl_callback",
    "policy_lvlianglist_callback",
    "policy_lvliangarticle_callback",
    "policy_lvliangarticle_etl_callback",
    "policy_sxjzlist_callback",
    "policy_sxjzarticle_callback",
    "policy_sxjzarticle_etl_callback",
    "policy_changzhilist_callback",
    "policy_changzhiarticle_callback",
    "policy_changzhiarticle_etl_callback",
    "policy_jcgovlist_callback",
    "policy_jcgovarticle_callback",
    "policy_jcgovarticle_etl_callback",
    "policy_linfenlist_callback",
    "policy_linfenarticle_callback",
    "policy_linfenarticle_etl_callback",
    "policy_yunchenglist_callback",
    "policy_yunchenglist1_callback",
    "policy_yunchengarticle_callback",
    "policy_yunchengarticle_etl_callback",
    "policy_fgwnmglist_callback",
    "policy_fgwnmgarticle_callback",
    "policy_fgwnmgarticle_etl_callback",
    "policy_gxtnmglist_callback",
    "policy_gxtnmgarticle_callback",
    "policy_gxtnmgarticle_etl_callback",
    "policy_kjtnmglist_callback",
    "policy_kjtnmgarticle_callback",
    "policy_kjtnmgarticle_etl_callback",
    "policy_nmgovedulist_callback",
    "policy_nmgovedulist1_callback",
    "policy_nmgoveduarticle_callback",
    "policy_nmgoveduarticle_etl_callback",
    "policy_mztnmglist_callback",
    "policy_mztnmgarticle_callback",
    "policy_mztnmgarticle_etl_callback",
    "policy_cztnmglist_callback",
    "policy_cztnmgarticle_callback",
    "policy_cztnmgarticle_etl_callback",
    "policy_rstnmglist_callback",
    "policy_rstnmgarticle_callback",
    "policy_rstnmgarticle_etl_callback",
    "policy_nmtnmglist_callback",
    "policy_nmtnmgarticle_callback",
    "policy_nmtnmgarticle_etl_callback",
    "policy_zjtnmglist_callback",
    "policy_zjtnmgarticle_callback",
    "policy_zjtnmgarticle_etl_callback",
    "policy_wjwnmglist_callback",
    "policy_wjwnmgarticle_callback",
    "policy_wjwnmgarticle_etl_callback",
    "policy_baotoulist_callback",
    "policy_baotouarticle_callback",
    "policy_baotouarticle_etl_callback",
    "policy_wuhailist_callback",
    "policy_wuhaiarticle_callback",
    "policy_wuhaiarticle_etl_callback",
    "policy_chifenglist_callback",
    "policy_chifengarticle_callback",
    "policy_chifengarticle_etl_callback",
    "policy_tongliaolist_callback",
    "policy_tongliaoarticle_callback",
    "policy_tongliaoarticle_etl_callback",
    "policy_ordoslist_callback",
    "policy_ordosarticle_callback",
    "policy_ordosarticle_etl_callback",
    "policy_hlbelist_callback",
    "policy_hlbearticle_callback",
    "policy_hlbearticle_etl_callback",
    "policy_bynrlist_callback",
    "policy_bynrarticle_callback",
    "policy_bynrarticle_etl_callback",
    "policy_wulanchabulist_callback",
    "policy_wulanchabulist1_callback",
    "policy_wulanchabuarticle_callback",
    "policy_wulanchabuarticle_etl_callback",
    "policy_xamlist_callback",
    "policy_xamarticle_callback",
    "policy_xamarticle_etl_callback",
    "policy_xlgllist_callback",
    "policy_xlglarticle_callback",
    "policy_xlglarticle_etl_callback",
    "policy_alslist_callback",
    "policy_alsarticle_callback",
    "policy_alsarticle_etl_callback",
    "policy_fgwbeijinglist_callback",
    "policy_fgwbeijingarticle_callback",
    "policy_fgwbeijingarticle_etl_callback",
    "policy_jxjbeijinglist_callback",
    "policy_jxjbeijingarticle_callback",
    "policy_jxjbeijingarticle_etl_callback",
    "policy_kwbeijinglist_callback",
    "policy_kwbeijingarticle_callback",
    "policy_kwbeijingarticle_etl_callback",
    "policy_jwbeijinglist_callback",
    "policy_jwbeijingarticle_callback",
    "policy_jwbeijingarticle_etl_callback",
    "policy_mzjbeijinglist_callback",
    "policy_mzjbeijingarticle_callback",
    "policy_mzjbeijingarticle_etl_callback",
    "policy_czjbeijinglist_callback",
    "policy_czjbeijinglist1_callback",
    "policy_czjbeijingarticle_callback",
    "policy_czjbeijingarticle_etl_callback",
    "policy_rsjbeijinglist_callback",
    "policy_rsjbeijingarticle_callback",
    "policy_rsjbeijingarticle_etl_callback",
    "policy_nyncjbeijinglist_callback",
    "policy_nyncjbeijingarticle_callback",
    "policy_nyncjbeijingarticle_etl_callback",
    "policy_zjwbeijinglist_callback",
    "policy_zjwbeijingarticle_callback",
    "policy_zjwbeijingarticle_etl_callback",
    "policy_wjwbeijinglist_callback",
    "policy_wjwbeijingarticle_callback",
    "policy_wjwbeijingarticle_etl_callback",
    "policy_bjdchlist_callback",
    "policy_bjdcharticle_callback",
    "policy_bjdcharticle_etl_callback",
    "policy_bjchylist_callback",
    "policy_bjchyarticle_callback",
    "policy_bjchyarticle_etl_callback",
    "policy_bjftlist_callback",
    "policy_bjftarticle_callback",
    "policy_bjftarticle_etl_callback",
    "policy_bjsjslist_callback",
    "policy_bjsjsarticle_callback",
    "policy_bjsjsarticle_etl_callback",
    "policy_bjhdlist_callback",
    "policy_bjhdarticle_callback",
    "policy_bjhdarticle_etl_callback",
    "policy_bjshylist_callback",
    "policy_bjshyarticle_callback",
    "policy_bjshyarticle_etl_callback",
    "policy_bjtzhlist_callback",
    "policy_bjtzharticle_callback",
    "policy_bjtzharticle_etl_callback",
    "policy_bjdxlist_callback",
    "policy_bjdxarticle_callback",
    "policy_bjdxarticle_etl_callback",
    "policy_bjfshlist_callback",
    "policy_bjfsharticle_callback",
    "policy_bjfsharticle_etl_callback",
    "policy_bjmtglist_callback",
    "policy_bjmtgarticle_callback",
    "policy_bjmtgarticle_etl_callback",
    "policy_bjchplist_callback",
    "policy_bjchparticle_callback",
    "policy_bjchparticle_etl_callback",
    "policy_bjpglist_callback",
    "policy_bjpgarticle_callback",
    "policy_bjpgarticle_etl_callback",
    "policy_bjmylist_callback",
    "policy_bjmyarticle_callback",
    "policy_bjmyarticle_etl_callback",
    "policy_bjhrlist_callback",
    "policy_bjhrarticle_callback",
    "policy_bjhrarticle_etl_callback",
    "policy_bjyqlist_callback",
    "policy_bjyqarticle_callback",
    "policy_bjyqarticle_etl_callback",
    "policy_fzggtjlist_callback",
    "policy_fzggtjarticle_callback",
    "policy_fzggtjarticle_etl_callback",
    "policy_gyxxhtjlist_callback",
    "policy_gyxxhtjarticle_callback",
    "policy_gyxxhtjarticle_etl_callback",
    "policy_kxjstjlist_callback",
    "policy_kxjstjarticle_callback",
    "policy_kxjstjarticle_etl_callback",
    "policy_jytjlist_callback",
    "policy_jytjarticle_callback",
    "policy_jytjarticle_etl_callback",
    "policy_mztjlist_callback",
    "policy_mztjarticle_callback",
    "policy_mztjarticle_etl_callback",
    "policy_cztjlist_callback",
    "policy_cztjarticle_callback",
    "policy_cztjarticle_etl_callback",
    "policy_hrsstjlist_callback",
    "policy_hrsstjarticle_callback",
    "policy_hrsstjarticle_etl_callback",
    "policy_nynctjlist_callback",
    "policy_nynctjarticle_callback",
    "policy_nynctjarticle_etl_callback",
    "policy_zfcxjstjlist_callback",
    "policy_zfcxjstjarticle_callback",
    "policy_zfcxjstjarticle_etl_callback",
    "policy_wsjktjlist_callback",
    "policy_wsjktjarticle_callback",
    "policy_wsjktjarticle_etl_callback",

    "policy_huhhotarticle_etl_callback",
    "policy_bjxcharticle_etl_callback",
]

def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


#   河北省发展和改革委员会
def policy_hbdrchebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@id="search"]/ul/li|//div[@id="search"]/tr')
        for li in li_list:

            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|td[1]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://hbdrc.hebei.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if '.htm' not in url:
                continue
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99081'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|td[1]/a/text()').extract_first().strip()
            if 'xxgk_2232/zc/wgfxwj' == callmodel.sql_model.list_rawid:
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            elif 'xxgk_2232/zc/wzcwj' == callmodel.sql_model.list_rawid:
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
            elif 'xxgk_2232/zc/zcjd_1143' == callmodel.sql_model.list_rawid:
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
            else:
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hbdrchebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hbdrchebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date'].replace('.', '')
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = cleaned(res.xpath('//div[@class="con_main"]/h2/text()').extract_first()).strip()
    if not title:
        title = cleaned(res.xpath('//div[@class="maintitle"]/text()|//div[@id="search"]/h1/text()').extract_first()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tab_syh"]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tab_syh"]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    fulltext_xpath = '//div[@class="con_nr"]|//div[@class="xxgk_rightwrap"]|//div[@class="contentmain"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99081'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HBDRCHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hbdrchebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省工业和信息化厅
def policy_gxthebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)"', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'fdzdgknr' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="xxgkList"]/li')
        else:
            li_list = res.xpath('//div[@class="ej-new-list"]/ul/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://gxt.hebei.gov.cn/hbgyhxxht/zcfg30/snzc/8743fba2-2.html'
            url = parse.urljoin(base_url, href)
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99082'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxthebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxthebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date'].replace('.', '')
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = cleaned(res.xpath('//div[@class="gxt-xilan"]//h1/text()').extract_first()).strip()
    if not title:
        title = cleaned(res.xpath('//h3[@class="detailTitle"]/text()').extract_first()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@class="gxt-xilan-content"]|//div[@class="artContent viewport"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    # organ_info = ''.join(res.xpath('//div[@class="gxt-xilan-date"]/span[contains(text(),"来源：")]//text()').extract())
    # organ = organ_info.split('来源：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="detaiSource"]/span[contains(text(),"来源：")]//text()').extract())
    #     organ = organ_info.split('来源：')[-1].strip()
    # if organ and ('河北' not in organ):
    #     organ = '河北省' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99082'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'GXTHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'gxthebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省科学技术厅
def policy_kjthebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)"', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="xxgk-xxbox"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[1]/a/@href').extract_first()
            base_url = f'https://kjt.hebei.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99083'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjthebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjthebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = cleaned(res.xpath('//h2[@class="cont_title"]/text()').extract_first()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99083'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'KJTHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'kjthebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省教育厅
def policy_jythebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('条(\d+)页<', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="nr"]//tr[@id="th"]/following::tr')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://jyt.hebei.gov.cn/column.jsp?id=1410097726928&current=2'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99084'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first()

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jythebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jythebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//input[@name="ArticleTitle"]/@value').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@class="conN"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    # organ_info = ''.join(res.xpath('//li[contains(text(),"来源：")]').extract())
    # organ = organ_info.split('来源：')[-1].strip()

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99084'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JYTHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jythebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省民政厅
def policy_minzhenghebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data['result'].get('totalPages', '')
        if not total_page:
            total_page = data['result']['page']['totalPages']
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data['result'].get('content', '')
        if not li_list:
            li_list = data['result']['page']['content']
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            _id = li['id']
            if 'moza' in callmodel.sql_model.list_rawid:
                url = f'http://minzheng.hebei.gov.cn/cms/open/api/moza/getZcfgById?zcfgId={_id}'
            else:
                url = f'http://minzheng.hebei.gov.cn/cms/open/api/content/getContentById?contentId={_id}'
            # rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = _id
            temp["sub_db_id"] = '99085'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['banbutime'] if li.get('banbutime', '') else li['publishTime']

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_minzhenghebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_minzhenghebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    html_json = json.loads(html)
    if '"zcfg":' in html:
        title = html_json['result']['zcfg']['title']
        provider_url = article_json['url']
        pub_date = html_json['result']['zcfg']['banbutime']
        pub_year = pub_date[:4]
        cont = html_json['result']['zcfg']['neirong']
        organ = html_json['result']['zcfg']['zhidingjiguan']
        pub_no = html_json['result']['zcfg']['zhengcefaguihao']
    else:
        title = html_json['result']['content']['title']
        provider_url = article_json['url']
        pub_date = html_json['result']['content']['publishTime']
        pub_year = pub_date[:4]
        cont = html_json['result']['content']['content']
        organ = html_json['result']['content']['resourceName']
        pub_no = ''
    # res = Selector(text=cont)
    # fulltext_xpath = '//div[@class="conN"]'
    fulltext = cont
    if not fulltext:
        raise Exception
    # organ_info = ''.join(res.xpath('//li[contains(text(),"来源：")]').extract())
    # organ = organ_info.split('来源：')[-1].strip()
    if organ and '河北' not in organ:
        if '省民政厅' not in organ:
            organ = '河北省民政厅' + organ
        else:
            organ = '河北省' + organ
    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99085'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MINZHENGHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'minzhenghebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = []
    if '"zcfg":' in html:
        dic = dict()
        dic = {
            "url": f"http://minzheng.hebei.gov.cn/{html_json['result']['zcfg']['fujianlujing']}",
            "name": data['title'],
            "pub_year": data['pub_year'],
            "keyid": data['keyid']
        }
        file_info.append(dic)
    else:
        for item in html_json['result']['attachments']:
            dic = dict()
            dic = {
                "url": f"http://minzheng.hebei.gov.cn/bxs-cms/{item['path']}",
                "name": item['name'],
                "pub_year": data['pub_year'],
                "keyid": data['keyid']
            }
            file_info.append(dic)
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省财政厅
def policy_czthebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'root17' not in callmodel.sql_model.list_rawid:
            max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 5
            total_page = max_count
        else:
            max_count = re.findall('m_nRecordCount = "(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 5
            total_page = math.ceil(max_count/25)
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'root17' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"list_292_{page}.htm"}
                else:
                    dic = {"page_info": f"index_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'root17' not in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="content clearfix"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://czt.hebei.gov.cn/zwgk/jsjf/index.html'
                title = li.xpath('span[1]/a/@title').extract_first().strip()
                pub_date = li.xpath('span[2]/text()').extract_first().strip()
                url = parse.urljoin(base_url, href)
                if 'hebei' not in url or '.pdf' in url:
                    continue
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99086'
                article_json["url"] = url
                article_json["title"] = title
                article_json["pub_date"] = pub_date.replace('(', '').replace(')', '')
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@id="documentContainer"]/table/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://czt.hebei.gov.cn/root17/3003/list_292.htm'
                url = parse.urljoin(base_url, href)
                if 'hebei' not in url or '.pdf' in url:
                    continue
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99086'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first()

                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czthebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czthebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'].strip())
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//b[contains(text(),"名　　称:")]/parent::td[1]/span/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="content_title_zhx"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//b[contains(text(),"名　　称:")]/following::td[1]//text()').extract()).strip()
    if not title:
        title = article_json['title']
    if 'xxgk ' in html:
        index_no = cleaned(res.xpath('//td/b[contains(text(),"索 引 号:")]/following::td[1]//text()').extract_first())
        subject = ''.join(res.xpath('//td/b[contains(text(),"分类:")]/following::td[1]//text()').extract()).strip()
        subject_word = ''.join(res.xpath('//td/b[contains(text(),"主 题 词:")]/following::td[1]//text()').extract()).strip()
        organ = ''.join(res.xpath('//td/b[contains(text(),"发布机构:")]/following::td[1]//text()').extract()).strip()
        pub_no = ''.join(res.xpath('//td/b[contains(text(),"文　　号:")]/following::td[1]//text()').extract()).strip()
    else:
        index_no = cleaned(res.xpath('//b[contains(text(),"索 引 号:")]/parent::td[1]/text()').extract_first())
        subject = ''.join(res.xpath('//b[contains(text(),"分类:")]/parent::td[1]/span/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//b[contains(text(),"主 题 词:")]/parent::td[1]/span/text()').extract()).strip()
        organ = ''.join(res.xpath('//b[contains(text(),"发布机构:")]/parent::td[1]/span/text()').extract()).strip()
        pub_no = ''.join(res.xpath('//b[contains(text(),"文　　号:")]/parent::td[1]/span/text()').extract()).strip()

    fulltext_xpath = '//div[@class="cont"]|//div[@class="content"]|//td[@class="tyxl_contentp"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99086'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CZTHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'czthebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = written_date
    data['subject_word'] = subject_word
    data['subject'] = subject
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省人力资源和社会保障厅
def policy_rsthebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共 <strong>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": list_json['page_info'].replace('_1', f"_{page}")}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zhengceku' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zck"]/table/tr')[1:]
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'https://rst.hebei.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if ',htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99087'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first()
                article_json["pub_no"] = li.xpath('td[2]/text()').extract_first()

                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="inner"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://rst.hebei.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99087'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first()
                article_json["pub_no"] = ''

                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rsthebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rsthebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_no = article_json['pub_no']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]/h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    # organ_info = ''.join(res.xpath('//div[@class="info"]/span[contains(text(),"来源:")]/text()').extract())
    # organ = organ_info.split('来源:')[-1].strip()
    # if organ and ('河北' not in organ):
    #     organ = '河北省' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99087'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'RSTHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'rsthebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省农业农村厅
def policy_nynchebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('总页数<span>1/(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"list-{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="column-list"]/li')
        else:
            li_list = res.xpath('//ul[@class="list_ul"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://nync.hebei.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if '.htm' not in url:
                continue

            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99088'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()

            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynchebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynchebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@class="mtitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    # organ_info = ''.join(res.xpath('//div[@class="source"]/center/text()').extract())
    # if '时间' in organ_info:
    #     organ_info = organ_info.split('时间')[0].strip()
    # organ = organ_info.split('来源：')[-1].strip()
    # if organ and ('河北' not in organ):
    #     organ = '河北省' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99088'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NYNCHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nynchebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject_word'] = subject_word
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省住房和城乡建设厅
def policy_zfcxjsthebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共(\d+)条记录', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        if 'zfxxgk' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count/21)
        else:
            total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgk' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="lbUR"]/li|//div[@id="lineOl"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href|a/@href').extract_first()
            base_url = f'http://zfcxjst.hebei.gov.cn/xxgk/fdzdg/ghjh/index.html'
            url = parse.urljoin(base_url, href)
            if '.htm' not in url:
                continue
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99089'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[4]/span/text()|span[@class="date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjsthebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zfcxjsthebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title1"]/span/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="conbox"]//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@conten|//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="Bodyhead"' in html:
        organ = ''.join(res.xpath('//div[@class="Bodyhead"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
        pub_no = ''.join(res.xpath('//div[@class="Bodyhead"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="Bodyhead"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//div[@class="Bodyhead"]//td[contains(text(),"主题词")]/following::td[1]/text()').extract()).strip()
        legal_status = ''
        index_no = ''.join(res.xpath('//div[@class="Bodyhead"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    else:
        organ = cleaned(res.xpath('//b[contains(text(),"发布机构")]/parent::li/text()').extract_first()).replace('：', '', 1)
        pub_no = cleaned(res.xpath('//b[contains(text(),"文　　号")]/parent::li/text()').extract_first()).replace('：', '', 1)
        subject = cleaned(res.xpath('//b[contains(text(),"主题分类")]/parent::li/text()').extract_first()).replace('：', '', 1)
        subject_word = cleaned(res.xpath('//b[contains(text(),"主 题 词")]/parent::li/text()').extract_first()).replace('：', '', 1)
        legal_status = cleaned(res.xpath('//b[contains(text(),"有效性")]/parent::li/text()').extract_first()).replace('：', '', 1)
        index_no = cleaned(res.xpath('//b[contains(text(),"索 引 号")]/parent::li/text()').extract_first()).replace('：', '', 1)

    fulltext_xpath = '//div[@id="BodyLabel"]|//div[@class="union"]|//div[@class="conbox"]|//div[@class="mainContnet"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(),"发布机构：")]/text()').extract())
    #     if '发布机构' in organ_info:
    #         organ = organ_info.split('发布机构：')[-1].strip()
    if not pub_no:
        pub_no_info = ''.join(res.xpath('//span[contains(text(),"文号：")]/text()').extract())
        if '文号' in pub_no_info:
            pub_no = pub_no_info.split('文号：')[-1].strip()
    if organ and ('河北' not in organ):
        organ = '河北' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99089'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZFCXJSTHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zfcxjsthebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = written_date
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="fujian"])')
    file_info3 = get_file_info(data, res, f'(//div[@id="fujBox"])')
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省住房和城乡建设厅
def policy_wsjkwhebeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        max_count = res.xpath('//div[@class="page-large"]//a[last()-1]/text()').extract()
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        if 'ghxx' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="xxgk-list"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://wsjkw.hebei.gov.cn/html/ghxx/index.jhtml'
                url = parse.urljoin(base_url, href)
                if 'wsjkw' not in url:
                    continue
                # url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99090'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('b/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="er-list"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://wsjkw.hebei.gov.cn/html/ghxx/index.jhtml'
                url = parse.urljoin(base_url, href)
                if 'wsjkw' not in url:
                    continue
                # url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99090'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first()

                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwhebeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwhebeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xxgk-contitle"]/h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@class="con-txt"]|//div[@class="xxgk-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    # organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]/text()').extract())
    # if '来源' in organ_info:
    #     organ = organ_info.split('来源：')[-1].strip()
    # else:
    #     organ = ''

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99090'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WSJKWHEBEI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wsjkwhebeicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省石家庄市
def policy_sjzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('条(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '1607069302772' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="nr-xh"]/following::div[1]/div/div')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]/a/@href').extract_first()
                base_url = f'https://www.sjz.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99091'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[2]/a/div[1]/text()').extract_first().strip()
                article_json["pub_date"] = ''
                article_json["pub_date_info"] = li.xpath('div[2]/a/div[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="nr"]//ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]/span/a/@href').extract_first()
                base_url = f'https://www.sjz.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99091'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[2]/text()').extract_first()
                article_json["pub_date_info"] = ''

                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sjzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sjzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    if (not pub_date) and article_json.get('pub_date_info', ''):
        pub_date_info = article_json.get('pub_date_info', '')
        pub_date = re.findall('\d+年\d+月\d+日', pub_date_info.replace('号', '日'))[0]
        year = re.findall('(\d+)年', pub_date)
        year = year[0].rjust(4, '0') if year else '0000'
        mouth = re.findall('(\d+)月', pub_date)
        mouth = mouth[0].rjust(2, '0') if mouth else '00'
        day = re.findall('(\d+)日', pub_date)
        day = day[0].rjust(2, '0') if day else '00'
        pub_date = year + mouth + day
        pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//input[@name="ArticleTitle"]/@value').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@id="conN"]|//font[@id="conN"]|//div[@id="gz"]|//div[@class="yjlmnr"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    pub_no = ''.join(res.xpath('//div[@class="yjlmnr"]/div/div[1]/div[2]/text()').extract())

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99091'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SJZ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'sjzcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   河北省唐山市
def policy_tangshanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('/(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="xxgk-xxbox"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[1]/a/@href').extract_first()
            base_url = f'http://new.tangshan.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99092'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tangshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tangshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="biaoti"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@id="conN"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99092'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'TANGSHAN'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'tangshancngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省秦皇岛市
def policy_qhdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>共(\d+)页', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//a[contains(text(),"尾页")]/@href').extract_first()
            max_count = re.findall('pages=(\d+)', page_info)
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'front_pcsec' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[contains(@class, "seclisttitle1 fl")]/parent::div[1]')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                if '60CC571538AC7C6A009A548656C2C9E2' in callmodel.sql_model.list_rawid:
                    href = li.xpath('div[1]/a[2]/@href').extract_first()
                    title = li.xpath('div[1]/a[2]/text()').extract_first()
                else:
                    href = li.xpath('div[1]/a/@href').extract_first()
                    title = li.xpath('div[1]/a/text()').extract_first()
                base_url = f'http://www.qhd.gov.cn/'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                rawid = url.split('uuid=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99093'
                article_json["url"] = url
                article_json["title"] = title
                article_json["pub_date"] = li.xpath('div[2]/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            if '342' in callmodel.sql_model.list_rawid:
                li_list = res.xpath('//table[@class="gz-table"]/tr')[1:]
                for li in li_list:
                    
                    temp = info_dicts.copy()
                    temp["task_tag"] = temp["task_tag_next"]
                    del temp["task_tag_next"]
                    article_json = dict()
                    href = li.xpath('td[2]/a/@href').extract_first()
                    base_url = f'http://www.qhd.gov.cn:81'
                    # url = parse.urljoin(base_url, href)
                    url = base_url + href
                    rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                    temp["rawid"] = rawid
                    temp["sub_db_id"] = '99093'
                    article_json["url"] = url
                    article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                    article_json["pub_date"] = ''
                    article_json["pub_date_info"] = li.xpath('td[2]/p/text()').extract_first().strip()

                    temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                    di_model_next.lists.append(temp)
            else:
                li_list = res.xpath('//table[@class="zc-table"]/tr')[1:]
                for li in li_list:
                    
                    temp = info_dicts.copy()
                    temp["task_tag"] = temp["task_tag_next"]
                    del temp["task_tag_next"]
                    article_json = dict()
                    href = li.xpath('td[2]/a/@href').extract_first()
                    base_url = f'http://www.qhd.gov.cn:81'
                    # url = parse.urljoin(base_url, href)
                    url = base_url + href
                    if 'htm' not in url:
                        continue
                    rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                    temp["rawid"] = rawid
                    temp["sub_db_id"] = '99093'
                    article_json["url"] = url
                    article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                    article_json["pub_no"] = li.xpath('td[4]/text()').extract_first().strip()
                    article_json["legal_status"] = li.xpath('td[4]/text()').extract_first().strip()

                    temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                    di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qhdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qhdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    pub_no = article_json.get('pub_no', '')
    legal_status = article_json.get('legal_status', '')
    if (not pub_date) and article_json.get('pub_date_info', ''):
        pub_date_info = article_json.get('pub_date_info', '')
        pub_date = re.findall('\d+年\d+月\d+日', pub_date_info.replace('号', '日'))[0]
        year = re.findall('(\d+)年', pub_date)
        year = year[0].rjust(4, '0') if year else '0000'
        mouth = re.findall('(\d+)月', pub_date)
        mouth = mouth[0].rjust(2, '0') if mouth else '00'
        day = re.findall('(\d+)日', pub_date)
        day = day[0].rjust(2, '0') if day else '00'
        pub_date = year + mouth + day
        pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="thirdtitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@class="thirdtext"]|//div[@id="content"]|//div[@class="gz-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    # organ_info = ''.join(res.xpath('//div[contains(text(),"来源：")]/text()').extract())
    # organ = organ_info.split('来源：')[-1]

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99093'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'QHD'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'qhdcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省邯郸市
def policy_hdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage=(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="szbmtclb fl clearfix"]/table/tr')[1:]
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'https://www.hd.gov.cn/hdzfxxgk/zfwj/index_1.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99094'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first()
            article_json["pub_date"] = li.xpath('td[4]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//strong[contains(text(),"文号")]/parent::p[1]/text()').extract()).strip().replace('：', '', 1)
    index_no = ''.join(res.xpath('//strong[contains(text(),"索引号")]/parent::p[1]/text()').extract()).strip().replace('：', '', 1)
    subject = ''.join(res.xpath('//strong[contains(text(),"主题分类")]/parent::p[1]/text()').extract()).strip().replace('：', '', 1)
    organ = ''.join(res.xpath('//strong[contains(text(),"发布机构")]/parent::p[1]/text()').extract()).strip().replace('：', '', 1)

    fulltext_xpath = '//div[@class="contents"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    if organ.startswith('市'):
        organ = '邯郸' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99094'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HD'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hdcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = written_date
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省邢台市
def policy_xingtailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zwgk/wjjd/zcjd' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="listrgterji"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.xingtai.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
                url = parse.urljoin(base_url, href)
                if 'xingtai' not in url:
                    continue
                # url = base_url + href
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99095'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first()
                article_json["pub_date"] = li.xpath('span/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@class="tabline"]/tr')[1:]
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.xingtai.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99095'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xingtaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xingtaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="my_conbox"]//h2/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="my_conbox"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"文") and contains(text(),"号")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"分")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"词")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构")]/parent::li[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    if organ.startswith('市'):
        organ = '邢台' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99095'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'XINGTAI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'xingtaicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = written_date
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_xingtailist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data['data']['total']
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = data['data']['rows']
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = li['uuid']
            url = f'http://hqzc.xtsblx.cn/#/PolicyDetails?uuid={rawid}'
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99095'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubDate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xingtaiarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xingtaiarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    html_json = json.loads(html)
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    text = html_json['data']['cmsPolicy']['content']
    res = Selector(text=text)

    organ = html_json['data']['cmsPolicy']['pubDept']

    fulltext_xpath = '//body'
    fulltext = text
    if not fulltext:
        raise Exception
    if organ.startswith('市'):
        organ = '邢台' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99095'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'XINGTAI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'xingtaicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省保定市
def policy_baodinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('/<span>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        if 'rsrmlist' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count/15)
        else:
            total_page = math.ceil(max_count/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'rsrmlist' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@class="list"]/tr|//table[@class="doclist"]/tr')
        for li in li_list:

            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if 'zwgkdoclist' in callmodel.sql_model.list_rawid:
                href = li.xpath('td[2]/a/@href').extract_first()
                title = li.xpath('td[2]/a/text()').extract_first()
                pub_date = li.xpath('td[4]/text()').extract_first()
            else:
                href = li.xpath('td[1]/a/@href').extract_first()
                title = li.xpath('td[1]/a/text()').extract_first()
                pub_date = li.xpath('td[1]/span/text()|td[2]/text()').extract_first()
            base_url = f'http://www.baoding.gov.cn/'
            if not href:
                continue
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if '.htm' not in url:
                continue
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99096'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = pub_date.replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_baodingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_baodingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="content-title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    organ = ''.join(res.xpath('//td//span[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    pub_no = ''.join(res.xpath('//td//span[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    written_date = clean_pubdate(''.join(res.xpath('//td//span[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip())
    subject = ''.join(res.xpath('//td//span[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td//span[contains(text(),"效力状态")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@id="scroll-area"]|//table[@class="sj_nr"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99096'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BAODING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'baodingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = written_date
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省张家口市
def policy_zjklist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('i>/(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 5
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'list' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="content"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.zjk.gov.cn/xxgk/list/list.thtml?classId=534&orgCode=&pn=2'
                url = parse.urljoin(base_url, href)
                if 'zjk' not in url or 'html' not in url:
                    continue

                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99097'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first()
                article_json["pub_date"] = li.xpath('span/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@class="zc-table"]/tr')[1:]
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'https://www.zjk.gov.cn/xxgk/list/list.thtml?classId=534&orgCode=&pn=2'
                url = parse.urljoin(base_url, href)
                if 'zjk' not in url or 'html' not in url:
                    continue
                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99097'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first()
                article_json["pub_no"] = li.xpath('td[3]/text()').extract_first()
                article_json["legal_status"] = li.xpath('td[5]/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjkarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjkarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    pub_no = article_json.get('pub_no', '')
    legal_status = article_json.get('legal_status', '')
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="t-type2"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # organ_info = ''.join(res.xpath('//div[@class="info"]/text()').extract()).strip()
    # organ = ''
    # if '发布机构：' in organ_info:
    #     organ = organ_info.split('发布机构：')[-1].split(' ')[0].strip()

    fulltext_xpath = '//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception
    # if organ.startswith('市'):
    #     organ = '邢台' + organ

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99097'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZJK'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zjkcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = written_date
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省承德市
def policy_chengdelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'list' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//li[@class="xxgk_list"]')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[@class="t-title"]/@href').extract_first()
            base_url = f'http://www.chengde.gov.cn/col/col9934/index.html?number=CD0001A00003'
            url = parse.urljoin(base_url, href)
            if '.htm' not in url:
                continue
            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99098'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[@class="t-title"]/text()').extract_first()
            article_json["pub_date"] = li.xpath('span[@class="t-time"]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chengdearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chengdearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构")]/parent::li[1]/text()').extract()).strip()
    pub_no_info = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"编")]/text()').extract()).strip()
    pub_no = pub_no_info.split('：')[-1].strip()
    index_no_info = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/text()').extract()).strip()
    index_no = index_no_info.split('：')[-1].strip()
    subject_info = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"分")]/text()').extract()).strip()
    subject = subject_info.split('：')[-1].strip()
    legal_status_info = ''.join(res.xpath('//td[contains(text(),"有") and contains(text(),"性")]/text()').extract()).strip()
    legal_status = legal_status_info.split('：')[-1].strip()
    organ_info = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"构")]/text()').extract()).strip()
    organ = organ_info.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '承德' + organ

    fulltext_xpath = '//div[@class="cont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99098'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CHENGDE'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'chengdecngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = written_date
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省沧州市
def policy_cangzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("'page_tag',(\d+),", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'c1000' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.cangzhou.gov.cn/cangzhou/gfxwj/list.shtml'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if '.shtm' not in url:
                    continue
                rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99099'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first()
                article_json["pub_date"] = li.xpath('span/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@class="zb_tab"]/tr')[1:]
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'https://www.cangzhou.gov.cn/cangzhou/gfxwj/list.shtml'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if '.shtm' not in url:
                    continue
                rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99099'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first()
                article_json["pub_date"] = li.xpath('td[5]/span/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cangzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cangzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="file_detail"]//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//p[@class="collectIdeaTitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构")]/parent::li[1]/text()').extract()).strip()
    pub_no = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"文") and contains(text(),"号")]/following::p[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"索引号")]/following::p[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"主题分类")]/following::p[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"主题词")]/following::p[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"有效性")]/following::p[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"成文日期")]/following::p[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="category"]//span[contains(text(),"发布机构")]/following::p[1]/text()').extract()).strip()
    organ = organ.replace(',', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="collectIdeaBoxMes"]//span[contains(text(),"来源 :")]/text()').extract()).strip()
    #     organ = organ_info.split('来源 :')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="file_detail"]|//div[@class="collectIdeaMainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99099'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CANGZHOU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'cangzhoucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省廊坊市
def policy_lflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"Index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'c1000' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="mainContent"]/ul/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.lf.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href

            rawid = re.findall('(.*?)\.aspx', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99100'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a//text()').extract())
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)


        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lflist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 1:
            header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Accept-Encoding": "gzip, deflate",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36",
            }
            response = requests.get(list_json['r_url'], headers=header)
            max_count = re.findall("recordCount = '(\d+)", response.text)
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/15)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"Index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'c1000' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://zfxxgk.lf.gov.cn/Zc/zcinfolist?deptId=39501&menuId=333'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'infoId=' not in url:
                continue
            rawid = url.split('infoId=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99100'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('td[2]/a//text()').extract()).strip()
            article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)


        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@class="articleTitle"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="titleBar"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]//text()').extract()).strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"发文字号：")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"文件索引：")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"效力状态：")]/following::td[1]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '廊坊' + organ

    fulltext_xpath = '//div[@class="conTxt"]|//td[@class="con-con-cnt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99100'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'LF'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'lfcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  河北省衡水市
def policy_hengshuilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)"', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '2469951' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="xxgkzclbtab3"]//tr')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://xxgk.hengshui.gov.cn/eportal/ui'
                # url = parse.urljoin(base_url, href)
                url = base_url + href

                rawid = re.findall('articleKey=(.*?)&', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99101'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@class="tab3"]//tr')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://xxgk.hengshui.gov.cn/eportal/ui'
                # url = parse.urljoin(base_url, href)
                url = base_url + href

                rawid = re.findall('articleKey=(.*?)&', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99101'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hengshuiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hengshuiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl_tit"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构")]/parent::li[1]/text()').extract()).strip()
    pub_no = ''.join(res.xpath('//strong[contains(text(),"文　　号：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//strong[contains(text(),"索 引 号：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//strong[contains(text(),"主题分类")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//strong[contains(text(),"主 题 词：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    legal_status = '有效'
    organ = ''.join(res.xpath('//strong[contains(text(),"发布机构：")]/parent::td[1]/following::td[1]/text()').extract()).strip()

    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="hsxxgk_cont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99101'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HENGSHUI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hengshuicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省发展和改革委员会
def policy_fgwshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgkxgwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="infodisc-con_cntitem--list"]/li')
        else:
            li_list = res.xpath('//ul[@class="submenu-dropbox_subtabs_content fixmt10"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://fgw.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            url = parse.urljoin(base_url, href)
            if '.shtm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99102'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('em/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="detail-article-title oflow-hd"]/h3/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//dl[@class="infodisc-con_cntitle"]/dt/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # organ = ''.join(res.xpath('//i[contains(text(),"来源：")]/parent::span[1]/text()').extract()).strip()

    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="article-body oflow-hd"]|//div[@class="infodisc-con_cntbox"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99102'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'FGWSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'fgwshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = in
    # dex_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省工业和信息化厅
def policy_gxtshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgkxgwj' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="zwgk-ul"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://gxt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if '.shtm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99103'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('i/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="m-3"]/h1[1]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # organ_info = ''.join(res.xpath('//span[contains(text(),"信息来源：")]/text()').extract()).strip()
    # organ = organ_info.split('信息来源：')[-1].strip()

    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="content-article"]/div[contains(@class,"textbody")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99103'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'GXTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'gxtshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省科学技术厅
def policy_kjtshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xxgk' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="main"]//tr[@class="bgtitle"]/following::tr')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://kjt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if '.shtm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99104'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]//text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="rightcontent"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://kjt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if 'kjt.shanxi' not in url:
                    continue
                if '.shtm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99104'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content"]/h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    organ = ''.join(
        res.xpath('//strong[contains(text(),"发布机构：")]/parent::td[1]/following::td[1]/text()').extract()).strip()

    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99104'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'KJTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'kjtshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省教育厅
def policy_jytshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'sjytxxgk' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@id="searchsection"]/tr')[1:]
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://jyt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if '.htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99105'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="xwzx_ggtz"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://jyt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if '.htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99105'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xwzx_ggtz_xq"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xxgk_gkml_xq"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(
        res.xpath('//table[@class="gkmltable1"]//span[contains(text(),"文号")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(
        res.xpath('//table[@class="gkmltable1"]//span[contains(text(),"索引号")]/parent::td[1]/text()').extract()).strip()
    subject = ''.join(
        res.xpath('//table[@class="gkmltable1"]//span[contains(text(),"主题分类")]/parent::td[1]/text()').extract()).strip()
    subject_word = ''.join(
        res.xpath('//table[@class="gkmltable1"]//span[contains(text(),"主题词")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(
        res.xpath('//table[@class="gkmltable1"]//span[contains(text(),"成文日期")]/parent::td[1]/text()').extract()).strip()
    organ = ''.join(
        res.xpath('//table[@class="gkmltable1"]//span[contains(text(),"发布机构")]/parent::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="zw_nr"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99105'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JYTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jytshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="zw_nr"]/following::div[1])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省民政厅
def policy_mztshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sjytxxgk' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="news"]/a')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('@href').extract_first()
            base_url = f'http://mzt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if '.htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99106'
            article_json["url"] = url
            article_json["title"] = li.xpath('span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('font/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)


        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="newsxxtitle"]/span//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="newscontent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99106'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MZTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mztshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省财政厅
def policy_cztshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgk' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="list list_down"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://czt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if '.shtm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99107'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[@class="time_item"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@id="oldList"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://czt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if '.shtm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.shtm', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99107'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="contentTitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="art"]|//div[@class="infoDetailBox"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99107'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CZTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'cztshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省人力资源和社会保障厅
def policy_rstshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="second_right_ul"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://rst.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if 'rst' not in url:
                continue
            if '.htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99108'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)


        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="a_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]/text()').extract()).strip()
    # organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[contains(@class,"view")]|//div[@class="info_text"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99108'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'RSTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'rstshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省农业农村厅
def policy_nynctshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": list_json['page_info'].replace('.', f"_{page}.")}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'sxnytzwgk' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="infodisc-con_cntitem--list"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://nynct.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if 'nynct' not in url:
                    continue
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99109'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('em/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="glcon"]/ul/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://nynct.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if not href:
                    continue
                if 'nynct' not in url:
                    continue
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99109'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@id="title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//dl[@class="infodisc-con_cntitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(
        res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(
        res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(
        res.xpath('//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(
        res.xpath('//td[contains(text(),"成") and contains(text(),"日")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(
        res.xpath('//td[contains(text(),"发") and contains(text(),"机")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="article"]|//div[@class="infodisc-con_cntbox"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99109'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NYNCTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nynctshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省住房和城乡建设厅
def policy_zjtshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sxnytzwgk' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class,"towpage_ulli")]/ul/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('p[1]/a/@href').extract_first()
            base_url = f'http://zjt.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99110'
            article_json["url"] = url
            article_json["title"] = li.xpath('p[1]/a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('p[2]/a/span/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="Article_a"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="detail clearfix"]/h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # organ_info = ''.join(res.xpath('//div[@class="Article_b"]//text()').extract()).strip()
    # organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="Article_c"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99110'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZJTSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zjtshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省卫生健康委员会
def policy_wjwshanxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sxnytzwgk' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="demo-right"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://wjw.shanxi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99111'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwshanxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwshanxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="boxC"]/h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # organ_info = ''.join(res.xpath('//div[@class="Article_b"]//text()').extract()).strip()
    # organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="ze-art"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99111'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WJWSHANXI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wjwshanxicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省太原市
def policy_taiyuanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>/(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="box_list"]/ul/li|//div[@class="boxSc04 pd20"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.taiyuan.gov.cn/{callmodel.sql_model.list_rawid}.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99112'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taiyuanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data['pageCount']
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = data['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = f'http://www.taiyuan.gov.cn{li["url"]}'
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99112'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publicTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taiyuanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_taiyuanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="mainCont"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="section_zc"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(
        res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(
        res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(
        res.xpath('//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(
        res.xpath('//td[contains(text(),"成") and contains(text(),"日")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(
        res.xpath('//td[contains(text(),"公开主体")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ = ''.join(res.xpath('//p[@class="explain grey666"]/em[2]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '太原' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="arc_box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99112'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'TAIYUAN'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'taiyuancngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省大同市
def policy_dtlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1 and 'dtszf/ghjh' == callmodel.sql_model.list_rawid:
            max_count = re.findall("'page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'dtzww' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list"]/ul/li|//ul[contains(@class,"list-cj-gl")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="hmn2"]/a/@href|a/@href|@data-url').extract_first()
            base_url = f'http://www.dt.gov.cn/{callmodel.sql_model.list_rawid}/list_list_2.shtml'
            url = parse.urljoin(base_url, href)
            if not href or 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99113'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="hmn2"]/a/text()|a/text()|@data-title').extract_first()
            article_json["pub_date"] = li.xpath('div[@class="hmn5"]/text()|span/text()|@data-time').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dtarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dtarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1/ucaptitle/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="xl_con1"' in html:
        pub_no = ''.join(res.xpath('//span[contains(text(),"文号：")]/following::text[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//span[contains(text(),"索 引 号：")]/following::text[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//span[contains(text(),"信息分类：")]/following::text[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//span[contains(text(),"发布机构：")]/following::text[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//strong[contains(text(),"文号：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//strong[contains(text(),"索引号：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//strong[contains(text(),"信息分类：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//strong[contains(text(),"发布机构：")]/parent::td[1]/following::td[1]/text()').extract()).strip()

    if not organ:
        organ = ''.join(res.xpath('//span[contains(text(),"来源:")]/ucapsource/text()').extract()).strip()


    fulltext_xpath = '//div[@id="content"]|//div[@id="detailCont2"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99113'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'DT'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'dtcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省朔州市
def policy_shuozhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'szfxxgk/fdzdgknr' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_{page}", "url_part": "szxxgk"}
                else:
                    dic = {"page_info": f"index_{page}", "url_part": "www"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'szfxxgk/fdzdgknr' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li|//dl[@class="sxinfo-pubfiles-item"]|//dl[@class="sxszf-mlists-items"]/ul/li')
        else:
            li_list = res.xpath('//div[@class="glcl"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('dt/a/@href|a/@href').extract_first()
            list_json = json.loads(callmodel.sql_model.list_json)
            base_url = f'http://{list_json["url_part"]}.shuozhou.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99114'
            article_json["url"] = url
            article_json["title"] = li.xpath('dt/a/text()|a/text()').extract_first()
            if 'szfxxgk/fdzdgknr' in callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''.join(li.xpath('span/text()|dd/i[2]/text()').extract())
            else:
                article_json["pub_date"] = li.xpath('text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shuozhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shuozhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="bt"]/text()|//div[@class="partrt"]/h2/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="fbxx"]/span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_year = ''.join(res.xpath('//div[@class="partl1"]/div[@class="year"]/text()').extract()).strip()
        pub_date_info = ''.join(res.xpath('//div[@class="partl1"]/div[@class="time"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_year + pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"发") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"主") and contains(text(),"类：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"成") and contains(text(),"期：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="affairs-detail-head mhide"]//td[contains(text(),"发") and contains(text(),"构：")]/following::td[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="container"]|//div[@class="scroll_cont ScrollStyle"]|//div[@class="artcle-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99114'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SHUOZHOU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'shuozhoucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省忻州市
def policy_sxxzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'zwyw/tzgg' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_{page}", "url_part": "www"}
                else:
                    dic = {"page_info": f"index_{page}", "url_part": "zwgk"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zwyw/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="chz-common-text-list-items"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.sxxz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99115'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="govinfo-list-title"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://zwgk.sxxz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99115'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('.//span[@class="list-title-time"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sxxzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sxxzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="article-title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3[@class="article-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"号：")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号：")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"类：")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"词：")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//span[contains(text(),"成") and contains(text(),"期：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"关：")]/parent::li[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="article-filed"]//span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '承德' + organ

    fulltext_xpath = '//div[@class="article-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99115'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SXXZ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'sxxzcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省阳泉市
def policy_yqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'zwgk' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_{page}", "url_part": "www"}
                else:
                    dic = {"page_info": f"{list_json['page_info']}_{page}", "url_part": "xxgk"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zwgk' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="main-listcon-items_box"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.yq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99116'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="infodisc-con_cntitem--list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://xxgk.yq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99116'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('em/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="main-wrap"]//h2/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//dl[@class="infodisc-con_cntitle"]/dt/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[contains(@class,"article-title")]/h2/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'affairs-detail-head' in html:
        pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"词：")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//td[contains(text(),"发文机关：")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//b[contains(text(),"文") and contains(text(),"号：")]/parent::h4[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//b[contains(text(),"索") and contains(text(),"号：")]/parent::h4[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//b[contains(text(),"主") and contains(text(),"类：")]/parent::h4[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//b[contains(text(),"主") and contains(text(),"词：")]/parent::h4[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//b[contains(text(),"成") and contains(text(),"期：")]/parent::h4[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//b[contains(text(),"发") and contains(text(),"构：")]/parent::h4[1]/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '阳泉' + organ

    fulltext_xpath = '//div[@class="content-body"]|//div[contains(@class,"article-body")]|//div[@class="infodisc-con_cntbox"]|//div[contains(@class,"affairs-detail-inner-cnt")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99116'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'YQ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'yqcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省吕梁市
def policy_lvlianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'szdt/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="right_x"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.lvliang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99117'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[contains(@class,"right_cont_table")]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.lvliang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                # url = base_url + href

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99117'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()|td[1]/a/font/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()|td[2]/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lvliangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lvliangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//span[contains(text(),"标") and contains(text(),"题")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if '0000' in pub_date:
        pub_date_info = res.xpath('//*[contains(text(),"发布日期")]/ancestor::td[1]/span[3]//text()').extract_first()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//span[contains(text(),"发文字号")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    subject = ''.join(res.xpath('//span[contains(text(),"主题分类")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//span[contains(text(),"成文日期")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发文机关")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '吕梁' + organ

    fulltext_xpath = '//div[@id="contentText"]|//div[@class="n"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99117'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'LVLIANG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'lvliangcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省晋中市
def policy_sxjzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zcjd' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="newsList"]/li')
        elif 'xwzx/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="pageTPList"]/li')
        else:
            li_list = res.xpath('//ul[@class="infoList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|div[@class="title"]/a/@href').extract_first()
            base_url = f'https://www.sxjz.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99118'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|div[@class="title"]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|div[@class="con"]//span[@class="date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sxjzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sxjzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//em[contains(text(),"标") and contains(text(),"题")]/ancestor::td[1]/span[3]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文 号：")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号：")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布单位：")]/following::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '晋中' + organ

    fulltext_xpath = '//div[@class="conTxt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99118'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'SXJZ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'sxjzcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省长治市
def policy_changzhilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xwzx/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@id="searchcontent"]/li')
        elif 'zbwj_235747' in callmodel.sql_model.list_rawid or 'ndjh_3413' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@id="ConList"]//ul/li')
        else:
            li_list = res.xpath('//ul[@class="govinfo-list-title"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.changzhi.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99119'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if 'xwzx/tzgg' in callmodel.sql_model.list_rawid:
                pub_date = li.xpath('span/text()|a/span/text()').extract_first().strip()
            elif 'zbwj_235747' in callmodel.sql_model.list_rawid or 'ndjh_3413' in callmodel.sql_model.list_rawid:
                pub_date = li.xpath('span/text()|a/span/text()').extract_first().strip()
            else:
                pub_date = li.xpath('span[@class="list-title-time"]/text()').extract_first().strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changzhiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_changzhiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = clean_pubdate(article_json['pub_date'])
    # pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="article-title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = res.xpath('//p[@class="pubtime"]/text()|//span[contains(text(),"日期：")]/text()').extract_first()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date or '0000' in pub_date:
        pub_date_info = res.xpath('//span[contains(text(),"发布日期：")]/parent::li[1]/text()').extract_first()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
        # raise Exception
    pub_no = ''.join(res.xpath('//span[contains(text(),"文") and contains(text(),"号：")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号：")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"词：")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//span[contains(text(),"成") and contains(text(),"间：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构：")]/parent::li[1]/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '长治' + organ

    fulltext_xpath = '//div[contains(@class,"article-body")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99119'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CHANGZHI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'changzhicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省晋城市
def policy_jcgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTM\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'dtxx/gsgg' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_{page}", "url_part": "www"}
                else:
                    dic = {"page_info": f"{list_json['page_info']}_{page}", "url_part": "xxgk"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'dtxx/gsgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="speech_box_content"]//ul/li')
            base_url = f'https://www.jcgov.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
        else:
            li_list = res.xpath('//ul[@class="list-items-box-inner"]/li')
            base_url = f'https://xxgk.jcgov.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99120'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|a/font/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jcgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jcgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="zwxxgk_ndbgwz"]//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="doc_title_panel"]/p[@class="generalContent_box_p"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"类：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成") and contains(text(),"期：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"构：")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ = ''.join(res.xpath('//p[@class="content_box_explain"]/span[2]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '晋城' + organ

    fulltext_xpath = '//div[@class="generalContent"]|//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99120'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JCGOV'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jcgovcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省临汾市
def policy_linfenlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'wj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="con"]/ul|//div[@id="ConList_svxxgk"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('li[1]/a/@href|a/@href').extract_first()
            base_url = f'http://www.linfen.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            # url = base_url + href

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99121'
            article_json["url"] = url
            article_json["title"] = li.xpath('li[1]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('li[@class="time"]/text()|a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_linfenarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_linfenarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="cont w"]//h1/text()|//dl[@class="sxzzb-detail-title"]/dt[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//span[contains(text(),"标") and contains(text(),"题：")]/parent::td[1]/following::td[1]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"文") and contains(text(),"号：")]/parent::td[1]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号：")]/parent::td[1]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"类：")]/parent::td[1]/following::td[1]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"词：")]/parent::td[1]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//span[contains(text(),"成") and contains(text(),"期：")]/parent::td[1]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构：")]/parent::td[1]/following::td[1]//text()').extract()).strip()

    # if not organ:
    #     organ = ''.join(res.xpath('//p[@class="content_box_explain"]/span[2]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '晋城' + organ

    fulltext_xpath = '//div[@class="pages_content"]|//div[@class="content"]|//div[@class="sxzzb-detail-box"]|//div[contains(@class,"TRS_UEDITOR")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99121'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'LINFEN'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'linfencngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山西省运城市
def policy_yunchenglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'pageSize' in callmodel.sql_model.list_rawid:
            page_json = json.loads(para_dicts["data"]["1_1"]['html'])
            total_page = page_json['pageCount']
        else:
            max_count = re.findall('>\d+/(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'pageSize' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"pageIndex={page}"}
                else:
                    dic = {"page_info": list_json['page_info'].replace('.', f'_{page}.')}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'pageSize' in callmodel.sql_model.list_rawid:
            page_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = page_json['list']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                base_url = f'https://www.yuncheng.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99122'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['writeTime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            if 'zc/cslm/sjgfxwj' in callmodel.sql_model.list_rawid:
                li_list = res.xpath('//div[@class="list_zhengce"]//tbody/tr')[:-1]
                for li in li_list:
                    temp = info_dicts.copy()
                    temp["task_tag"] = temp["task_tag_next"]
                    del temp["task_tag_next"]
                    article_json = dict()
                    href = li.xpath('td[2]/h4/a/@href').extract_first()
                    base_url = f'https://www.yuncheng.gov.cn'
                    # url = parse.urljoin(base_url, href)
                    url = base_url + href
                    if not href:
                        continue
                    if 'htm' not in url:
                        continue
                    rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                    temp["rawid"] = rawid
                    temp["sub_db_id"] = '99122'
                    article_json["url"] = url
                    article_json["title"] = li.xpath('td[2]/h4/a/text()').extract_first().strip()
                    article_json["pub_date"] = ''
                    temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                    di_model_next.lists.append(temp)
            else:
                li_list = res.xpath('//div[@class="tab_list"]/ul/li')
                for li in li_list:
                    temp = info_dicts.copy()
                    temp["task_tag"] = temp["task_tag_next"]
                    del temp["task_tag_next"]
                    article_json = dict()
                    href = li.xpath('a/@href').extract_first()
                    base_url = f'https://www.yuncheng.gov.cn'
                    # url = parse.urljoin(base_url, href)
                    url = base_url + href
                    if not href:
                        continue
                    if 'htm' not in url:
                        continue
                    rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                    temp["rawid"] = rawid
                    temp["sub_db_id"] = '99122'
                    article_json["url"] = url
                    article_json["title"] = li.xpath('a/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                    temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                    di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yunchenglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = data['pageCount']
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)

            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = data['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'https://www.yuncheng.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if not href:
                continue
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99122'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['writeTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yunchengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yunchengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath('//span[@class="infoe_time"]/text()').extract()).strip()
        pub_year = pub_date[:4]
    title = ''.join(res.xpath('//div[@class="info_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(
        res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(
        res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(
        res.xpath('//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(
        res.xpath('//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(
        res.xpath('//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(
        res.xpath('//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ = ''.join(res.xpath('//span[@class="info_source"]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '运城' + organ

    fulltext_xpath = '//div[@id="info_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99122'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'YUNCHENG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'yunchengcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区发展和改革委员会
def policy_fgwnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xxgk/zxzx' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="yzgl_list_box"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://fgw.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or url.endswith('/') or 'nmg' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99123'
                article_json["url"] = url
                title_info = li.xpath('a/script/text()').extract_first().strip()
                article_json["title"] = re.findall("baoti='(.*?)';", title_info)[0]
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://fgw.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99123'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="d_biaoti"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xl_page_bt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="d_show"]|//div[@class="xl_main"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99123'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'FGWNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'fgwnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区工业和信息化厅
def policy_gxtnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 3
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zcjd' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="list_03"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://gxt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or 'nmg' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99124'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://gxt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99124'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()|td[2]/a/p/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="show_title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="BuXiLan"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="BuConx"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="BuConx"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="BuConx"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="BuConx"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="BuConx"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="text_article"]|//div[@id="font_size"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99124'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'GXTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'gxtnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区科学技术厅
def policy_kjtnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'kjdt/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="bd"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://kjt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or 'nmg' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99125'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'https://kjt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99125'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="SubXi_left on"]//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="SubXi"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="SubXi"]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="SubXi"]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="SubXi"]//th[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="SubXi"]//th[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    legal_status = legal_status.replace('——', '')
    organ = ''.join(res.xpath('//div[@class="SubXi"]//th[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('发布日期')[0].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99125'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'KJTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'kjtnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区教育厅
def policy_nmgovedulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk/fdzdgknr' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'https://www.nmgov.edu.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99126'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            pub_date = li.xpath('td[5]/text()').extract_first()
            if not pub_date:
                pub_date = li.xpath('td[4]/text()').extract_first()
            article_json["pub_date"] = pub_date.strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nmgovedulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        max_count = html_json['data']['total']
        total_page = math.ceil(max_count/10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'zfxxgk/fdzdgknr' in callmodel.sql_model.list_rawid:
        li_list = html_json['data']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['docpuburl']
            base_url = f'https://jyt.nmg.gov.cn/zfxxgk/fdzdgknr/?gk=3'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99126'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['docpubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nmgoveduarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nmgoveduarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="SubXi_left on"]//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="SubXi"]//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//span[@id="docTitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[contains(text(),"发布日期：")]/text()|//span[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="dy_jydt"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="dy_jydt"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="dy_jydt"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="dy_jydt"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="dy_jydt"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split(' ')[0].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@id="pare"]|//div[@id="docContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99126'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NMGOVEDU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nmgoveducngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区民政厅
def policy_mztnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("Pager\(\{size:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'mzzx/gzdt' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="m-gzcjd"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://mzt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or url.endswith('/'):
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99127'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://mzt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or 'nmg' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99127'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/p/text()|td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="ql_detailbro_title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="bt f-fs20"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('///div[@class="zjbf f-cb6"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('字号')[0].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="pare"]|//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99127'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MZTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mztnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区财政厅
def policy_cztnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'mzzx/gzdt' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://czt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url or 'nmg' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99128'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="ql_detailbro_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[contains(text(),"信息来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('信息来源：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="pare"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99128'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CZTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'cztnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区人力资源和社会保障厅
def policy_rstnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'mzzx/gzdt' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://rst.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url or 'nmg' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99129'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|td[2]/a/p/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="d_biaoti"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@id="d_laiyuan"]//span[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="d_show"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99129'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'RSTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'rstnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区农牧厅
def policy_nmtnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'mzzx/gzdt' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://nmt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url or 'nmg' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99130'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|td[2]/a/p/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nmtnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nmtnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="ql_detailbro_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[contains(text(), "信息来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('信息来源：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="pare"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99130'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NMTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nmtnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区住房和城乡建设厅
def policy_zjtnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgkn' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://zjt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99131'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="BuYuJue"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('h3/a/@href').extract_first()
                base_url = f'http://zjt.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or 'nmg' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99131'
                article_json["url"] = url
                article_json["title"] = li.xpath('h3/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="BuXiLan"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//th[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//th[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//th[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//th[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('发布日期')[0].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="pare"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99131'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZJTNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zjtnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区卫生健康委员会
def policy_wjwnmglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgkn' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://wjw.nmg.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99132'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwnmgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwnmgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="ql_detailbro_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[contains(text(), "信息来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('信息来源：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '内蒙古' + organ

    fulltext_xpath = '//div[@id="pare"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99132'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WJWNMG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wjwnmgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区包头市
def policy_baotoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(";\d+/(\d+)&", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if '5947' in list_rawid or '5903' in list_rawid or '5915' in list_rawid or '5787' in list_rawid:
            li_list = res.xpath('//div[@class="content"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.baotou.gov.cn/'
                # url = parse.urljoin(base_url, href)
                url = base_url + href.replace('../', '')
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99134'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@id="hr_table"]/tr')[2:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'https://www.baotou.gov.cn/'
                # url = parse.urljoin(base_url, href)
                url = base_url + href.replace('../', '')
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99134'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_baotouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_baotouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="neirong_centop"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//b[contains(text(),"名") and contains(text(),"称")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//b[contains(text(),"文") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//b[contains(text(),"索") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//b[contains(text(),"文") and contains(text(),"效")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//b[contains(text(),"成") and contains(text(),"期")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//b[contains(text(),"发") and contains(text(),"构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[contains(text(), "信息来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('信息来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '包头' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99134'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BAOTOU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'baotoucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'(//div[@id="pageContent"])')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区乌海市
def policy_wuhailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        if '38dbb1e2' in callmodel.sql_model.list_rawid:
            max_count = int(max_count[-1])
        else:
            max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if '220a8899' in list_rawid or '551bd9d9' in list_rawid:
            li_list = res.xpath('//div[contains(@class,"gz_list")]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.wuhai.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99135'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[@class="d"]/text()|a/span[@class="d"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif '38dbb1e2' in list_rawid:
            li_list = res.xpath('//div[contains(@class,"gz_list")]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[1]/span[@class="span2"]/a/@href').extract_first()
                base_url = f'http://www.wuhai.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99135'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[1]/span[@class="span2"]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[1]/span[@class="span4 fbsj"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="xzfg_list"]/ul/li|//ul[@class="wh_thr_list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.wuhai.gov.cn'
                # url = parse.urljoin(base_url, href)

                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99135'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wuhaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wuhaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="wh_xl_t"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"信") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"效") and contains(text(),"性")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[@id="ly"]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '乌海' + organ

    fulltext_xpath = '//div[@id="wh_x_c"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99135'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WUHAI'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wuhaicngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区赤峰市
def policy_chifenglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'szfwj' in list_rawid or 'zfbwj' in list_rawid or 'gfxwj' in list_rawid or 'jhgh' in list_rawid:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.chifeng.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99136'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[last()]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="listArea_pub"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.chifeng.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99136'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/p/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chifengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chifengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="zwgk_xl_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"主") and contains(text(),"分")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"文") and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="ql_detailbro_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[contains(text(), "来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('发布')[0].strip()
    if organ.startswith('市'):
        organ = '赤峰' + organ

    fulltext_xpath = '//div[@class="zwgk_xl_contentArea"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99136'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CHIFENG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'chifengcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区通辽市
def policy_tongliaolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tlzfwz150500/tzgg' in list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr|//div[contains(@class,"lis_list_part2_list")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            base_url = f'https://www.tongliao.gov.cn/{list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99137'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[last()]/text()|a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tongliaoarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tongliaoarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="textc"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="lis_list_part2_title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'ql_detailbro_table' in html:
        pub_no = ''.join(res.xpath('//table[contains(@class,"ql_detailbro_table")]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[contains(@class,"ql_detailbro_table")]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[contains(@class,"ql_detailbro_table")]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''
        written_date = ''.join(res.xpath('//table[contains(@class,"ql_detailbro_table")]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[contains(@class,"ql_detailbro_table")]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[contains(@class,"ql_detailbro_table")]//td[contains(text(),"公文时效")]/following::td[1]/text()').extract()).strip()
    else:
        # pub_no = ''.join(res.xpath('//span[contains(text(),"文") and contains(text(),"号")]/following-sibling::em[1]/text()').extract()).strip()
        # index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号")]/following-sibling::em[1]/text()').extract()).strip()
        # subject = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"类")]/following-sibling::em[1]/text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"词")]/following-sibling::em[1]/text()').extract()).strip()
        # written_date = ''.join(res.xpath('//span[contains(text(),"成") and contains(text(),"期")]/following-sibling::em[1]/text()').extract()).strip()
        # organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"关")]/following-sibling::em[1]/text()').extract()).strip()
        # legal_status = ''
        pub_no = ''
        index_no = ''
        subject = ''
        subject_word = ''
        written_date = ''
        organ = ''
        legal_status = ''
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="pull-left sjly"]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '通辽' + organ

    fulltext_xpath = '//div[@class="conzynrs"]|//div[@class="lis_list_part2_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99137'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'TONGLIAO'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'tongliaocngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区鄂尔多斯市
def policy_ordoslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage =(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("'page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'ordoszf/4033' in list_rawid:
            li_list = res.xpath('//table[@id="table1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.ordos.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99138'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[6]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="d_zfxx_centext"]/dl/dd/p|//ul[@class="yzgl_right_list"]/li|//ul[@id="d_list_ul"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]/a/@href|a/@href').extract_first()
                base_url = f'http://www.ordos.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                if not href:
                    continue
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99138'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[2]/a/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('div[3]/span/text()|span/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ordosarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ordosarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="docTitleCls"]/p//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xl_bt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()|//li[contains(text(),"成") and contains(text(),"期")]/following::li[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()|//li[contains(text(),"文") and contains(text(),"号")]/following::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()|//li[contains(text(),"索") and contains(text(),"号")]/following::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"信") and contains(text(),"类")]/following::td[1]/text()|//li[contains(text(),"信") and contains(text(),"类")]/following::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有") and contains(text(),"性")]/following::td[1]/text()|//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()|//li[contains(text(),"成") and contains(text(),"期")]/following::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"关")]/following::td[1]/text()|//li[contains(text(),"发") and contains(text(),"关")]/following::li[1]/text()').extract()).strip()
    if not organ:
        organ = ''.join(res.xpath('//span[@class="source"]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '鄂尔多斯' + organ

    fulltext_xpath = '//div[@id="ContentRegion"]|//div[@id="xxcb_content"]|//div[@class="xl_zw"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99138'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ORDOS'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'ordoscngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区呼伦贝尔市
def policy_hlbelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('pagecount="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tlzfwz150500/tzgg' in list_rawid:
        li_list = res.xpath('//div[@id="xxgk_main"]//table/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a[1]/@href').extract_first()
            base_url = f'http://www.hlbe.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99139'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hlbearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hlbearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//b[contains(text(),"文号：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//b[contains(text(),"索引号：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//b[contains(text(),"主题分类：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//b[contains(text(),"成文日期：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//b[contains(text(),"发布机构：")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="pull-left sjly"]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '呼伦贝尔' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99139'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HLBE'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hlbecngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区巴彦淖尔市
def policy_bynrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tlzfwz150500/tzgg' in list_rawid:
        li_list = res.xpath('//ul[@class="wzgl_list yzgl_right_list"]/li|//ul[@class="djh_lbul"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|div/a/@href').extract_first()
            base_url = f'https://www.bynr.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99140'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a//text()|div/a//text()').extract()).strip()
            article_json["pub_date"] = li.xpath('span/text()|div/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bynrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bynrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="mm_all_p"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"发文字号")]/following::li[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(text(),"索 引 号")]/following::li[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"信息分类")]/following::li[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"成文日期")]/following::li[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//span[contains(text(),"有效性")]/following::span[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"发文机关")]/following::li[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="pull-left sjly"]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '巴彦淖尔' + organ

    fulltext_xpath = '//div[@class="mm_all_article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99140'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BYNR'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bynrcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区乌兰察布市
def policy_wulanchabulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        list_json = json.loads(callmodel.sql_model.list_json)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            headers = {
                "Accept": "*/*",
                "Accept-Encoding": "gzip, deflate, br",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Cache-Control": "no-cache",
                "Host": "www.wulanchabu.gov.cn",
                "Pragma": "no-cache",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest",
            }
            url = f'https://www.wulanchabu.gov.cn/{list_json["page_info"]}/index.{callmodel.sql_model.list_rawid}.page.json'
            response = requests.get(url, headers=headers)
            total_page = json.loads(response.text)['pageTotal']
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tlzfwz150500/tzgg' in list_rawid:
        li_list = json.loads(para_dicts["data"]["1_1"]['html'])
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = str(li['docno'])
            base_url = f'https://www.wulanchabu.gov.cn/zfwj/index.html'
            url = f'https://www.wulanchabu.gov.cn/{li["channelPath"]}/{href}.html'
            if not href:
                continue
            if 'htm' not in url:
                continue
            rawid = href
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99141'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wulanchabulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tlzfwz150500/tzgg' in list_rawid:
        li_list = res.xpath('//table[@id="table1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'https://www.wulanchabu.gov.cn/zcfg/index_1.html'
            url = parse.urljoin(base_url, href)
            if not href:
                continue
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99141'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('td[2]/a//text()').extract()).strip()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wulanchabuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wulanchabuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip().replace('\n', '').replace('\t', '')
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"文号：")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号：")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"是否有效：")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发文机关：")]/following::td[1]//text()').extract()).strip()

    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="pull-left sjly"]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    # if organ.startswith('市'):
    #     organ = '巴彦淖尔' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@id="content3"]|//div[@id="content_txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99141'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WULANCHABU'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wulanchabucngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="btn_download"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区兴安盟
def policy_xamlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if '3230752' in list_rawid:
            li_list = res.xpath('//ul[@class="third_list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                base_url = f'http://www.xam.gov.cn/'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99142'
                article_json["url"] = url
                article_json["title"] = ''.join(li.xpath('span[1]/a/text()').extract()).strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif '4535463' in list_rawid or '4529286' in list_rawid:
            li_list = res.xpath('//div[@class="qgl_system_btn"]/div')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]/a/@href').extract_first()
                base_url = f'http://www.xam.gov.cn/eportal/ui'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                rawid = re.findall('articleKey=(.*?)&', url)[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99142'
                article_json["url"] = url
                article_json["title"] = ''.join(li.xpath('div[2]/a/text()').extract()).strip()
                article_json["pub_date"] = li.xpath('div[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="xxgkList"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.xam.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99142'
                article_json["url"] = url
                article_json["title"] = ''.join(li.xpath('a/text()').extract()).strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xamarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xamarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@class="detailTitle"]//text()|//h2[@class="detailTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('发文字号：')[-1].strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('索引号：')[-1].strip()
    subject = ''.join(res.xpath('//td[contains(text(),"信息分类：")]/text()').extract()).strip()
    subject = subject.split('信息分类：')[-1].strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期：")]/text()').extract()).strip()
    written_date = written_date.split('成文日期：')[-1].strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性：")]/span/text()').extract()).strip()
    legal_status = '有效' if legal_status == '1' else '已失效'
    organ = ''.join(res.xpath('//td[contains(text(),"发文机构：")]/text()').extract()).strip()
    organ = organ.split('发文机构：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="detaiSource"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('盟'):
        organ = '内蒙古兴安' + organ

    fulltext_xpath = '//div[contains(@class,"detailContent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99142'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'XAM'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'xamcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区锡林郭勒盟
def policy_xlgllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        li_list = res.xpath('//div[@class="mulu_box"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.xlgl.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99143'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xlglarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xlglarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="pdfDom"]//h1/text()|//div[@class="pt_title"]//text()|//div[@class="MuLuBotx"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//td[contains(text(),"公开日期：")]/text()').extract()).strip()
        pub_date = pub_date.split('公开日期：')[-1].strip()
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = ''.join(res.xpath('//div[@class="MuLuText"]/text()').extract()).strip()
        if '更新日期：' in pub_date:
            pub_date = pub_date.split('更新日期：')[-1].strip()
            pub_year = pub_date[:4]
        else:
            pub_date = pub_date.split('发布日期：')[-1].strip()
            pub_year = pub_date[:4]

    pub_no = ''.join(res.xpath('//td[contains(text(),"发文字号：")]//text()').extract()).strip()
    pub_no = pub_no.split('发文字号：')[-1].strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号：")]//text()').extract()).strip()
    index_no = index_no.split('索引号：')[-1].strip()
    subject = ''.join(res.xpath('//td[contains(text(),"信息分类：")]//text()').extract()).strip()
    subject = subject.split('信息分类：')[-1].strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期：")]//text()').extract()).strip()
    written_date = written_date.split('成文日期：')[-1].strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性：")]//text()').extract()).strip()
    date_info = re.findall('comp\("(.*?)"', legal_status)
    legal_status = '失效' if date_info and date_info[0][:8] < pub_date else '有效'
    organ = ''.join(res.xpath('//td[contains(text(),"发文机构：")]//text()').extract()).strip()
    organ = organ.split('发文机构：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="documentInfo"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('盟'):
        organ = '内蒙古锡林郭勒' + organ

    fulltext_xpath = '//div[@class="Custom_UnionStyle"]|//div[@id="Zoom"]|//div[@class="pt_content"]|//div[@id="para"]|//div[@class="MuLuBotContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99143'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'XLGL'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'xlglcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区阿拉善盟
def policy_alslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(";共(\d+)页&", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//table[@class="tb_title tb_search"]/parent::form[1]/following::table[1]/tr')[1:]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f''
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99144'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_alsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_alsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="main"]/h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="main"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="main"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="main"]//td[contains(text(),"组配分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="main"]//td[contains(text(),"成文时间")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="main"]//td[contains(text(),"公文时效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="main"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('盟'):
        organ = '内蒙古阿拉善' + organ

    fulltext_xpath = '//div[contains(@class,"zoomCon")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99144'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ALS'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'alscngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市发展和改革委员会
def policy_fgwbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://fgw.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99145'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//li[contains(text(),"[效力]")]//span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"[制定机关]")]//span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[contains(@class,"xl_title_sub")]//span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[@class="xl_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99145'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'FGWBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'fgwbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="fj_appendix3"])')
    file_str = ''.join(re.findall('<div class="xl_appendix" id="fj_appendix3">.*?>\'', html))
    file_res = Selector(text=file_str)
    file_info3 = get_file_info(data, file_res, f'(//body)')
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市经济和信息化局
def policy_jxjbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("Pager\(\{size:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jxj.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99146'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().replace(']', '').replace('[', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxjbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxjbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]/h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip().replace('-','')
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]//span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"[发文机构]")]//span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="info clearfix"]//div[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99146'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JXJBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jxjbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//ul[@class="fujian"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市科学技术委员会
def policy_kwbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("Pager\(\{size:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = re.findall('urls\[i\]=.*?i\+\+;', para_dicts["data"]["1_1"]['html'])
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = re.findall("urls\[i\]='(.*?)'", li)[0]
            base_url = f'http://kw.beijing.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99147'
            article_json["url"] = url
            article_json["title"] = re.findall('headers\[i\]="(.*?)"', li)[0].strip()
            year = re.findall("year\[i\]='(.*?)'", li)[0].strip()
            month = re.findall("month\[i\]='(.*?)'", li)[0].strip()
            day = re.findall("day\[i\]='(.*?)'", li)[0].strip()
            article_json["pub_date"] = f"{year}{month}{day}"
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kwbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kwbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]//span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"[发文机构]")]//span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//tr[@class="headTop"]//td[contains(text(),"信息来源：")]//text()').extract()).strip()
    #     organ = organ_info.split('信息来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//td[@class="bt_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99147'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'KWBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'kwbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市教育委员会
def policy_jwbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//div[@class="announce_list a-hov-c"]/ul/li|//div[@class="zg z-mylist"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jw.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99148'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|em/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jwbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jwbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="tit_header"]/h3/text()|//div[@class="announce_content"]/h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="detail"]//span[contains(text(),"文") and contains(text(),"号")]/following::em[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="detail"]//span[contains(text(),"索") and contains(text(),"号")]/following::em[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="detail"]//span[contains(text(),"题") and contains(text(),"类")]/following::em[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="detail"]//span[contains(text(),"生") and contains(text(),"期")]/following::em[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="detail"]//span[contains(text(),"效") and contains(text(),"性")]/following::em[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="detail"]//span[contains(text(),"发") and contains(text(),"构")]/following::em[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="tit_massage"]//span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[contains(@class,"mycontont")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99148'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JWBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jwbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市民政局
def policy_mzjbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("Pager\(\{size:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = re.findall('urls\[i\] =.*?i\+\+;', para_dicts["data"]["1_1"]['html'], re.S)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = re.findall("urls\[i\] = '(.*?)'", li)[0]
            base_url = f'http://mzj.beijing.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99149'
            article_json["url"] = url
            article_json["title"] = re.findall('headers\[i\] = "(.*?)"', li)[0].strip()
            year = re.findall("year\[i\] = '(.*?)'", li)[0].strip()
            month = re.findall("month\[i\] = '(.*?)'", li)[0].strip()
            day = re.findall("day\[i\] = '(.*?)'", li)[0].strip()
            article_json["pub_date"] = f"{year}{month}{day}"
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzjbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzjbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]//span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"[发文机构]")]//span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//td[contains(text(),"来源：")]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//td[@class="bt_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99149'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MZJBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mzjbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市财政局
def policy_czjbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page-1}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//div[@class="ul-back"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://czj.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99150'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|a/font/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czjbeijinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(" 第.*?/ (\d+) 页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//table[@class="entrytb"]/tr')[1:]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href_info = li.xpath('td[2]//a/@href').extract_first()
            if not href_info:
                continue
            href = re.findall("\('(.*?)'\)", href_info)[0]
            base_url = f'http://fwxt.czj.beijing.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'law_id' not in url:
                continue
            rawid = url.split('law_id=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99150'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]//a/text()|td[2]//a/font/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czjbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czjbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//span[@id="lTitle"]//text()|//div[@class="header"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"【文　　号】")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"【法规类别】")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"【时 效 性】")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"【颁布单位】")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[@id="mainText"]|//div[@id="lContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99150'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CZJBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'czjbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市人力资源和社会保障局
def policy_rsjbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://rsj.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99151'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rsjbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rsjbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="header"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip()
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]//span//text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"[发文机构]")]//span//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//td[contains(text(),"来源：")]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[@id="mainTextZoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99151'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'RSJBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'rsjbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市农村农业局
def policy_nyncjbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//div[@class="mk_con"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://nyncj.beijing.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99152'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncjbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncjbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="easysite-news-title"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//span[contains(text(),"[发文字号]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//span[contains(text(),"[主题分类]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//span[contains(text(),"[成文日期]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//span[contains(text(),"[实施日期]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//span[contains(text(),"[废止日期]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//span[contains(text(),"[有效性]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    organ1 = ''.join(res.xpath('//span[contains(text(),"[发文机构]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    organ2 = ''.join(res.xpath('//span[contains(text(),"[联合发文单位]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    organ = f"{organ1};{organ2}" if organ1 and organ2 else organ1
    if not organ:
        organ = ''.join(res.xpath('//span[contains(text(),"来源：")]/font/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[@id="easysiteText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99152'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NYNCJBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nyncjbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市住房和城乡建设委员会
def policy_zjwbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="easysite-list-modelone"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('p/a/@href').extract_first()
            base_url = f'http://zjw.beijing.gov.cn'
            if not href:
                continue
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99153'
            article_json["url"] = url
            article_json["title"] = li.xpath('p/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('p/span[@class="article-time"]/text()|p/a/span[@class="article-time"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjwbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjwbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if '...' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"发文字号")]/span//text()').extract()).strip().replace('-', '')
    index_no = ''.join(res.xpath('//li[contains(text(),"索 引 号")]/span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"主题分类")]/span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"印发时间")]/span//text()').extract()).strip().replace('-', '')
    # impl_date = ''.join(res.xpath('//span[contains(text(),"[实施日期]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    # invalid_date = ''.join(res.xpath('//span[contains(text(),"[废止日期]")]/parent::li/div//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//li[contains(text(),"有 效 性")]/span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"发文机关")]/span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('市'):
        organ = '北京' + organ

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99153'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZJWBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zjwbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市卫生健康委员会
def policy_wjwbeijinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('{size:(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//div[@class="weinei_left_con"]/div')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[2]/a/@href').extract_first()
            base_url = f'http://wjw.beijing.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99154'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[2]/a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwbeijingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwbeijingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="easysite-news-title"]/h2//text()|//div[@class="weinei_left_tit_sanji"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]/span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]/span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]/span//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]/span//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]/span//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]/span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"[发文单位]")]/span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ = ''.join(res.xpath('//span[contains(text(),"来源：")]/font/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '北京' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99154'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WJWBEIJING'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wjwbeijingcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市东城区
def policy_bjdchlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('{size:(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//div[@class="middle_result_con"]/div|//ul[@class="list clearfix"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.bjdch.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99155'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/h5/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('p[2]/span[1]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjdcharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjdcharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[contains(@class,"header")]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(text(),"索") and contains(text(),"号")]//span//text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"[效") and contains(text(),"力")]//span//text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"[发布机构]")]//span//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[contains(text(),"来源：")]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市东城' + organ

    fulltext_xpath = '//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99155'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJDCH'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjdchcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市朝阳区
def policy_bjchylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>共(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="news_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.bjchy.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99157'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjchyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjchyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="top"]/h1//text()|//h1[@class="content_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]//span//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//li[contains(text(),"[发文机构]")]//span//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="sub_title2_left"]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市朝阳' + organ

    fulltext_xpath = '//div[@class="content_article"]|//div[@class="con_txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99157'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJCHY'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjchycngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市丰台区
def policy_bjftlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="lmgglist"]/li|//ul[@class="lmgglis"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.bjchy.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.bjft.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99158'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/span[@class="bttit"]/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('a/span[@class="lmtime fr"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjftarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjftarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="contitle"]/h1//text()|//div[@class="Contitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//b[contains(text(),"[发文编号")]/following::span[1]//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//b[contains(text(),"[主题分类")]/following::span[1]//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//b[contains(text(),"[成文日期]")]/following::span[1]//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//b[contains(text(),"[实施日期]")]/following::span[1]//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//b[contains(text(),"[废止日期]")]/following::span[1]//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//b[contains(text(),"[有效性]")]/following::span[1]//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//b[contains(text(),"[发文机构]")]/following::span[1]//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市丰台' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99158'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJFT'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjftcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市石景山区
def policy_bjsjslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if 'script' in li.extract():
                href_info = re.findall("write\('(.*?')\)", li.extract())[0]
                href_res = Selector(text=href_info)
                href = href_res.xpath('//a/@href').extract_first()
                title = ''.join(href_res.xpath('//a//text()').extract_first()).strip()
            else:
                href = li.xpath('a/@href').extract_first()
                title = li.xpath('a/text()').extract_first().strip()
            base_url = f'http://www.bjsjs.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjft.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99159'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjsjsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjsjsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class,"titleBox")]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"[文号]")]/span//text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]/span//text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]/span//text()').extract()).strip()
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]/span//text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]/span//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]/span//text()').extract()).strip()
    organ1 = ''.join(res.xpath('//li[contains(text(),"[发布机构]")]/span//text()').extract()).strip()
    organ2 = ''.join(res.xpath('//li[contains(text(),"[联合发文机构]")]/span//text()').extract()).strip()
    organ = f"{organ1};{organ2}" if organ1 and organ2 else organ1
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="f1"]/span[contains(text(),"来源:")]/text()').extract()).strip()
    #     organ = organ_info.split('来源:')[-1]
    if organ.startswith('区'):
        organ = '北京市石景山' + organ

    fulltext_xpath = '//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99159'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJSJS'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjsjscngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市海淀区
def policy_bjhdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}", "url_part":list_json['url_part']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="secBox_list"]/li|//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href_info = re.findall("write\('(.*?')\)", li.extract())[0]
            href_res = Selector(text=href_info)
            href = href_res.xpath('//a/@href').extract_first()
            title = ''.join(href_res.xpath('//a//text()').extract_first()).strip()
            base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            # base_url = f'http://www.bjft.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99160'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjhdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjhdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="contentTit"]/h1//text()|//div[@class="conTit"]/h2//text()|//div[@class="header"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"信息有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"公开责任部门")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="f1"]/span[contains(text(),"来源:")]/text()').extract()).strip()
    #     organ = organ_info.split('来源:')[-1]
    if organ.startswith('区'):
        organ = '北京市海淀' + organ

    fulltext_xpath = '//div[@id="mainText"]|//div[@class="contentTxt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99160'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJHD'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjhdcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市顺义区
def policy_bjshylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="newsList"]/li|//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            base_url = f'http://www.bjshy.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99161'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()|a/span[1]/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjshyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjshyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@class="detailTitle"]//text()|//div[@class="easysite-news-title"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"文号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//span[contains(text(),"生成日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//span[contains(text(),"信息有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发文机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ = ''.join(res.xpath('//font[@id="xxly"]/text()').extract()).strip()
    #     # organ = organ_info.split('来源:')[-1]
    if organ.startswith('区'):
        organ = '北京市顺义' + organ

    fulltext_xpath = '//div[@id="easysiteText"]|//div[@class="artContent viewport"]|//div[@class="easysite-news-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99161'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJSHY'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjshycngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市通州区
def policy_bjtzhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="m-ulList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            base_url = f'http://www.bjtzh.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99162'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjtzharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjtzharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="header"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"[发文字号]")]//span//text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"[主题分类]")]//span//text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"[成文日期]")]//span//text()').extract()).strip()
    impl_date = ''.join(res.xpath('//li[contains(text(),"[实施日期]")]//span//text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//li[contains(text(),"[废止日期]")]//span//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"[有效性]")]//span//text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"[发文机构]")]//span//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市通州' + organ

    fulltext_xpath = '//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99162'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJTZH'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjtzhcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市大兴区
def policy_bjdxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[contains(@class,"listNews")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[2]/@href|a/@href').extract_first()
            # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            base_url = f'http://www.bjdx.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99163'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[2]/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a//span/text()').extract_first().strip().replace('发布日期：', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjdxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjdxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class, "subArticleTitle")]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//b[contains(text(),"发文序号")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//b[contains(text(),"索引号")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//b[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//b[contains(text(),"成文日期")]/parent::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//b[contains(text(),"信息有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//b[contains(text(),"发文单位")]/parent::li[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="detail"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市大兴' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99163'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJDX'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjdxcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市房山区
def policy_bjfshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="secondList"]/li|//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if 'script' in li.extract():
                href_info = re.findall("write\('(.*?')\)", li.extract())[0]
                href_res = Selector(text=href_info)
                href = href_res.xpath('//a/@href').extract_first()
                title = ''.join(href_res.xpath('//a//text()').extract_first()).strip()
            else:
                href = li.xpath('a/@href').extract_first()
                title = li.xpath('a/text()').extract_first().strip()
            base_url = f'http://www.bjfsh.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            # base_url = f'http://www.bjft.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99164'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjfsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjfsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="txtContent"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # organ_info = ''.join(res.xpath('//p[@class="f1"]/span[contains(text(),"来源:")]/text()').extract()).strip()
    # organ = organ_info.split('来源:')[-1]
    # if organ.startswith('区'):
    #     organ = '北京市房山' + organ

    fulltext_xpath = '//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99164'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJFSH'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjfshcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市门头沟区
def policy_bjmtglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list")]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|h3/a/@href').extract_first()
            # base_url = f'http://www.bjmtg.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            base_url = f'http://www.bjmtg.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99165'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|h3/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjmtgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjmtgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if '...' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if len(pub_date) != 10:
        pub_date_info = ''.join(res.xpath('//p[@class="fl"]//text()').extract()).strip()
        pub_date = pub_date_info.replace('日期：','').split(' ')[0].strip()
        pub_year = pub_date[:4]

    pub_no = ''.join(res.xpath('//span[contains(text(),"发文字号")]/following::em[1]//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//span[contains(text(),"成文日期")]/following::em[1]//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//span[contains(text(),"实施日期")]/following::em[1]//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//span[contains(text(),"废止日期")]/following::em[1]//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//span[contains(text(),"文件有效性")]/following::em[1]//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//span[contains(text(),"发文单位")]/following::em[1]//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].strip()
    if organ.startswith('区'):
        organ = '北京市门头沟' + organ

    fulltext_xpath = '//div[@id="detailContent"]|//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99165'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJMTG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjmtgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市昌平区
def policy_bjchplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        li_list = res.xpath('//ul[@class="news_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            base_url = f'http://www.bjchp.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99166'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjchparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjchparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title_c"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"发文字号")]/span/text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(text(),"索引号")]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"主题分类")]/span/text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"成文日期")]/span/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"有效性")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"发文机构")]/span/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="detail_xx"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市昌平' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99166'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJCHP'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjchpcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市平谷区
def policy_bjpglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if '521777' in list_rawid or '521780' in list_rawid:
            li_list = res.xpath('//ul[@class="xxgk-xxbox"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
                base_url = f'http://www.bjpg.gov.cn/'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99167'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="commonTXTlist"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div/a[2]/@href|a[3]/@href|a[2]/@href').extract_first()
                # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
                base_url = f'http://www.bjpg.gov.cn/'
                # url = parse.urljoin(base_url, href)
                url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99167'
                article_json["url"] = url
                article_json["title"] = li.xpath('div/a[2]/text()|a[3]/text()|a[2]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div/span/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjpgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjpgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if '...' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"发文序号")]/following::span[1]//text()').extract()).strip().replace('-', '')
    index_no = ''.join(res.xpath('//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//span[contains(text(),"主题分类")]/following::span[1]//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//span[contains(text(),"成文日期")]/following::span[1]//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//span[contains(text(),"实施日期")]/following::span[1]//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//span[contains(text(),"废止日期")]/following::span[1]//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('//span[contains(text(),"有效性")]/following::span[1]//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//span[contains(text(),"发文单位")]/following::span[1]//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '北京市平谷' + organ

    fulltext_xpath = '//div[@class="detailContent"]|//div[@id="easysiteText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99167'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJPG'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjpgcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   北京市密云区
def policy_bjmylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.bjmy.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99168'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first()
            article_json["pub_date"] = li.xpath('span/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjmyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjmyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="zwxxgk_ndbgwz"]/h1//text()|//div[@class="wz_tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"发文字号")]/span/text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(text(),"索引号")]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"主题分类")]/span/text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"成文日期")]/span/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"有效性")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"发文机构")]/span/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市密云' + organ

    fulltext_xpath = '//div[@class="scroll_cont ScrollStyle"]|//div[@class="wz_article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99168'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJMY'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjmycngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市怀柔区
def policy_bjhrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'zwgk/zfwj' in list_rawid:
            li_list = res.xpath('//div[@class="hr_ls_li"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[2]/@href').extract_first()
                base_url = f'http://www.bjhr.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if not href:
                    continue
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99169'
                article_json["url"] = url
                article_json["title"] = li.xpath('a[2]/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip().replace('[', '').replace(']',
                                                                                                                    '')
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.bjhr.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if not href:
                    continue
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99169'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjhrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjhrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = re.findall("<title>(.*?)</title>", html, re.S)[0]
    title = title.split('_')[0].strip()
    if not title:
        title = ''.join(res.xpath('//h1/p/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(text(),"发文字号")]/span/text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(text(),"索引号")]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//li[contains(text(),"主题分类")]/span/text()').extract()).strip()
    written_date = ''.join(res.xpath('//li[contains(text(),"生成日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//li[contains(text(),"有效性")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//li[contains(text(),"发文机构")]/span/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '北京市怀柔' + organ

    fulltext_xpath = '//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99169'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJHR'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjhrcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  北京市延庆区
def policy_bjyqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if '521777' in list_rawid or '521780' in list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'https://zyk.bjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
            base_url = f'http://www.bjyq.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99170'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip().replace('[', '').replace(']', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bjyqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bjyqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if '...' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"文号")]/following::span[1]//text()').extract()).strip().replace('-', '')
    index_no = ''.join(res.xpath('//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip().replace('-', '')
    subject = ''.join(res.xpath('//span[contains(text(),"主题分类")]/following::span[1]//text()').extract()).strip().replace('-', '')
    written_date = ''.join(res.xpath('//span[contains(text(),"生成日期")]/following::span[1]//text()').extract()).strip().replace('-', '')
    impl_date = ''.join(res.xpath('//span[contains(text(),"实施日期")]/following::span[1]//text()').extract()).strip().replace('-', '')
    invalid_date = ''.join(res.xpath('//span[contains(text(),"废止日期")]/following::span[1]//text()').extract()).strip().replace('-', '')
    legal_status = ''.join(res.xpath('///span[contains(text(),"有效性")]/following::span[1]//text()').extract()).strip().replace('-', '')
    organ = ''.join(res.xpath('//span[contains(text(),"公开责任部门")]/following::span[1]//text()').extract()).strip().replace('-', '')
    # if not organ:
    #     organ_info = ''.join(res.xpath('//span[@class="con-source"]//text()').extract()).strip()
    #     organ = organ_info.split('来源:')[-1].strip()
    if organ.startswith('区'):
        organ = '北京市延庆' + organ

    fulltext_xpath = '//div[@id="neirong"]|//div[@id="content-wrap"]|//div[@id="scrollbar1"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99170'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'BJYQ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'bjyqcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市发展和改革委员会
def policy_fzggtjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="xl-r2-list"]/li|//ul[@class="list-main-group"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                href = re.findall("isDownLoad\(this,'(.*?)'\)", li.extract())[0]
            base_url = f'https://fzgg.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99171'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[@class="list-main-title"]/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span[@class="list-main-date"]/text()|div/span[@class="xl-r2li-s3"]/text()|a/div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fzggtjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fzggtjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="details-main-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="details-box"]|//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99171'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'FZGGTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'fzggtjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市工业与信息化局
def policy_gyxxhtjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="xl-r2-list"]/li|//ul[@class="news_list2 news_list3"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://gyxxh.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99172'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if not li.xpath('a/span[@class="time"]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first():
                continue
            article_json["pub_date"] = li.xpath('a/span[@class="time"]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gyxxhtjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gyxxhtjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="news_title titlep"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="page_info"]|//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99172'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'GYXXHTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'gyxxhtjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市科学技术局
def policy_kxjstjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="xl-r2-list"]/li|//ul[@class="news_list news_list2"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://kxjs.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99173'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span[@class="time"]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kxjstjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kxjstjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="common-content-mainTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="page_info"]|//div[@id="xlrllt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99173'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'KXJSTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'kxjstjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市教育委员会
def policy_jytjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="xl-r2-list"]/li|//ul[@class="common-list-right-list"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if 'isDownLoad' in li.extract():
                href = re.findall("\(this,'(.*?)'", li.extract())[0]
            else:
                href = li.xpath('a/@href').extract_first()
            base_url = f'https://jy.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99174'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('p[@class="list-date"]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="common-content-mainTitle"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99174'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'JYTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'jytjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市民政局
def policy_mztjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('pagenav_(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="list-content"]/ul/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="title"]/a/@href').extract_first()
            base_url = f'https://mz.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99175'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="title"]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[@class="date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="article-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="article-detail"]|//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99175'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'MZTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'mztjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市财政局
def policy_cztjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"totalPages":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'search' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"pageNumber={page+1}"}
                else:
                    dic = {"page_info": f"index_{page}.html"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'search' in callmodel.sql_model.list_rawid:
            res_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = res_json['page']['content']
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['DOCPUBURL']
                url = href
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99176'
                article_json["url"] = url
                article_json["title"] = li['DOCTITLE']
                article_json["pub_date"] = li['PUBDATE'][:10]
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//ul[@class="xl-r2-list"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://cz.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99176'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="common-content-mainTitle"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99176'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'CZTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'cztjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市人力资源和社会保障局
def policy_hrsstjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="fmultimedia-y fl"]/ul/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://hrss.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99177'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span[@class="fr"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrsstjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrsstjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="common-content-mainTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99177'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'HRSSTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'hrsstjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市农业农村委员会
def policy_nynctjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="common-list mbt50"]/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://nync.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99178'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/div/text()|a/text()').extract_first().strip()
            if not li.xpath('a/span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first():
                continue
            article_json["pub_date"] = li.xpath('a/span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()|//div[@class="common-content-mainTitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]|//div[contains(@class,"article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99178'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'NYNCTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'nynctjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="qt-attachments qt-has"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市住房和城乡建设委员会
def policy_zfcxjstjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'xxgk_70/tzgg' == list_rawid or 'xxgk_70/zcwj/wfwj' == list_rawid:
            li_list = re.findall('var item_.*?\{.*?\};', para_dicts["data"]["1_1"]['html'], re.S)
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = re.findall('url:"(.*?)",', li)[0]
                base_url = f'https://zfcxjs.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99179'
                article_json["url"] = url
                article_json["title"] = re.findall('title:"(.*?)"', li)[0].strip()
                article_json["pub_date"] = re.findall('doctime:"(.*?)"', li)[0].strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="pub-list"]/li|//ul[@class="xl-r2-list"]/li')
            for li in li_list:
                
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[1]/@href').extract_first()
                base_url = f'https://zfcxjs.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99179'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/div/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjstjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zfcxjstjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="big-title"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="detail"]|//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99179'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'ZFCXJSTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'zfcxjstjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市卫生健康委员会
def policy_wsjktjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgk_70/tzgg' == list_rawid or 'xxgk_70/zcwj/wfwj' == list_rawid:
        li_list = res.xpath('//div[@class="default_pgContainer"]/li')
        for li in li_list:
            
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://wsjk.tj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                print(url)
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99180'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjktjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjktjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="pages"]//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99180'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = 'WSJKTJ'
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = 'wsjktjcngovpolicy'
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    save_data.append({'table': 'policy_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})

    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区呼和浩特市
def policy_huhhotarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1//text()|//div[@class="container_tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'detialbox_table' in html:
        pub_no = re.findall('getwh\("(.*?)"', html)
        pub_no = pub_no[0] if pub_no else ''
        index_no = ''.join(res.xpath('//div[@class="containers"]//div[@class="detialbox_table"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="containers"]//div[@class="detialbox_table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="containers"]//div[@class="detialbox_table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="containers"]//div[@class="detialbox_table"]//td[contains(text(),"公文时效")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="containers"]//div[@class="detialbox_table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = re.findall('getwh\("(.*?)"', html)
        pub_no = pub_no[0] if pub_no else ''
        index_no = ''.join(res.xpath('//div[contains(@class,"zcwjk_cont_left")]//th[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[contains(@class,"zcwjk_cont_left")]//th[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[contains(@class,"zcwjk_cont_left")]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[contains(@class,"zcwjk_cont_left")]//th[contains(text(),"公文时效")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[contains(@class,"zcwjk_cont_left")]//th[contains(text(),"发文机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '呼和浩特' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@id="contentText"]|//div[@id="para"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99133'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUHHOT"
    zt_provider = "huhhotcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  内蒙古自治区呼和浩特市
def policy_bjxcharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    pub_no = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"实施日期")]/following::span[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"废止日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"有效性")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="box_bq"]//span[contains(text(),"发文机构")]/span/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '北京市西城' + organ

    fulltext_xpath = '//div[@class="xiangqing"]/*[not(@class="othermessage clearfix")]'
    fulltext = ''.join(res.xpath(fulltext_xpath).extract())
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99156'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BJXCH"
    zt_provider = "bjxchcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result