import datetime
import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64
import requests

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_fzggwnxlist_callback",
    "policy_fzggwnxarticle_callback",
    "policy_fzggwnxarticle_etl_callback",
    "policy_gxtnxlist_callback",
    "policy_gxtnxarticle_callback",
    "policy_gxtnxarticle_etl_callback",
    "policy_kjtnxlist_callback",
    "policy_kjtnxarticle_callback",
    "policy_kjtnxarticle_etl_callback",
    "policy_jytnxlist_callback",
    "policy_jytnxarticle_callback",
    "policy_jytnxarticle_etl_callback",
    "policy_macnxlist_callback",
    "policy_macnxarticle_callback",
    "policy_macnxarticle_etl_callback",
    "policy_cztnxlist_callback",
    "policy_cztnxarticle_callback",
    "policy_cztnxarticle_etl_callback",
    "policy_hrssnxlist_callback",
    "policy_hrssnxarticle_callback",
    "policy_hrssnxarticle_etl_callback",
    "policy_nynctnxlist_callback",
    "policy_nynctnxarticle_callback",
    "policy_nynctnxarticle_etl_callback",
    "policy_jstnxlist_callback",
    "policy_jstnxarticle_callback",
    "policy_jstnxarticle_etl_callback",
    "policy_wsjkwnxlist_callback",
    "policy_wsjkwnxarticle_callback",
    "policy_wsjkwnxarticle_etl_callback",
    "policy_yinchuanlist_callback",
    "policy_yinchuanarticle_callback",
    "policy_yinchuanarticle_etl_callback",
    "policy_shizuishanlist_callback",
    "policy_shizuishanarticle_callback",
    "policy_shizuishanarticle_etl_callback",
    "policy_wuzhonglist_callback",
    "policy_wuzhongarticle_callback",
    "policy_wuzhongarticle_etl_callback",
    "policy_nxgylist_callback",
    "policy_nxgyarticle_callback",
    "policy_nxgyarticle_etl_callback",
    "policy_nxzwlist_callback",
    "policy_nxzwarticle_callback",
    "policy_nxzwarticle_etl_callback",
    "policy_xjdrcxinjianglist_callback",
    "policy_xjdrcxinjiangarticle_callback",
    "policy_xjdrcxinjiangarticle_etl_callback",
    "policy_gxtxinjianglist_callback",
    "policy_gxtxinjiangarticle_callback",
    "policy_gxtxinjiangarticle_etl_callback",
    "policy_kjtxinjianglist_callback",
    "policy_kjtxinjiangarticle_callback",
    "policy_kjtxinjiangarticle_etl_callback",
    "policy_jytxinjianglist_callback",
    "policy_jytxinjiangarticle_callback",
    "policy_jytxinjiangarticle_etl_callback",
    "policy_mztxinjianglist_callback",
    "policy_mztxinjiangarticle_callback",
    "policy_mztxinjiangarticle_etl_callback",
    "policy_cztxinjianglist_callback",
    "policy_cztxinjiangarticle_callback",
    "policy_cztxinjiangarticle_etl_callback",
    "policy_rstxinjianglist_callback",
    "policy_rstxinjiangarticle_callback",
    "policy_rstxinjiangarticle_etl_callback",
    "policy_nynctxinjianglist_callback",
    "policy_nynctxinjiangarticle_callback",
    "policy_nynctxinjiangarticle_etl_callback",
    "policy_zjtxinjianglist_callback",
    "policy_zjtxinjiangarticle_callback",
    "policy_zjtxinjiangarticle_etl_callback",
    "policy_wjwxinjianglist_callback",
    "policy_wjwxinjiangarticle_callback",
    "policy_wjwxinjiangarticle_etl_callback",
    "policy_urumqilist_callback",
    "policy_urumqiarticle_callback",
    "policy_urumqiarticle_etl_callback",
    "policy_klmylist_callback",
    "policy_klmyarticle_callback",
    "policy_klmyarticle_etl_callback",
    "policy_tlflist_callback",
    "policy_tlfarticle_callback",
    "policy_tlfarticle_etl_callback",
    "policy_hamilist_callback",
    "policy_hamiarticle_callback",
    "policy_hamiarticle_etl_callback",
    "policy_akslist_callback",
    "policy_aksarticle_callback",
    "policy_aksarticle_etl_callback",
    "policy_kashilist_callback",
    "policy_kashiarticle_callback",
    "policy_kashiarticle_etl_callback",
    "policy_xjhtlist_callback",
    "policy_xjhtarticle_callback",
    "policy_xjhtarticle_etl_callback",
    "policy_cjlist_callback",
    "policy_cjarticle_callback",
    "policy_cjarticle_etl_callback",
    "policy_xjbozlist_callback",
    "policy_xjbozarticle_callback",
    "policy_xjbozarticle_etl_callback",
    "policy_xjbzlist_callback",
    "policy_xjbzarticle_callback",
    "policy_xjbzarticle_etl_callback",
    "policy_xjkzlist_callback",
    "policy_xjkzlist1_callback",
    "policy_xjkzarticle_callback",
    "policy_xjkzarticle_etl_callback",
    "policy_xjyllist_callback",
    "policy_xjylarticle_callback",
    "policy_xjylarticle_etl_callback",
    "policy_xjtclist_callback",
    "policy_xjtcarticle_callback",
    "policy_xjtcarticle_etl_callback",
    "policy_xjaltlist_callback",
    "policy_xjaltlist1_callback",
    "policy_xjaltarticle_callback",
    "policy_xjaltarticle_etl_callback",
    "policy_drchljlist_callback",
    "policy_drchljlist1_callback",
    "policy_drchljarticle_callback",
    "policy_drchljarticle_etl_callback",
    "policy_gxthljlist_callback",
    "policy_gxthljarticle_callback",
    "policy_gxthljarticle_etl_callback",
    "policy_hljkjtlist_callback",
    "policy_hljkjtarticle_callback",
    "policy_hljkjtarticle_etl_callback",
    "policy_jythljlist_callback",
    "policy_jythljarticle_callback",
    "policy_jythljarticle_etl_callback",
    "policy_mzthljlist_callback",
    "policy_mzthljarticle_callback",
    "policy_mzthljarticle_etl_callback",
    "policy_czthljlist_callback",
    "policy_czthljarticle_callback",
    "policy_czthljarticle_etl_callback",
    "policy_hrsshljlist_callback",
    "policy_hrsshljlist1_callback",
    "policy_hrsshljarticle_callback",
    "policy_hrsshljarticle_etl_callback",
    "policy_nyncthljlist_callback",
    "policy_nyncthljarticle_callback",
    "policy_nyncthljarticle_etl_callback",
    "policy_zfcxjsthljlist_callback",
    "policy_zfcxjsthljarticle_callback",
    "policy_zfcxjsthljarticle_etl_callback",
    "policy_wsjkwhljlist_callback",
    "policy_wsjkwhljarticle_callback",
    "policy_wsjkwhljarticle_etl_callback",
    "policy_harbinlist_callback",
    "policy_harbinarticle_callback",
    "policy_harbinarticle_etl_callback",
    "policy_qqhrlist_callback",
    "policy_qqhrarticle_callback",
    "policy_qqhrarticle_etl_callback",
    "policy_jixilist_callback",
    "policy_jixiarticle_callback",
    "policy_jixiarticle_etl_callback",
    "policy_heganglist_callback",
    "policy_heganglist1_callback",
    "policy_hegangarticle_callback",
    "policy_hegangarticle_etl_callback",
    "policy_shuangyashanlist_callback",
    "policy_shuangyashanarticle_callback",
    "policy_shuangyashanarticle_etl_callback",
    "policy_daqinglist_callback",
    "policy_daqingarticle_callback",
    "policy_daqingarticle_etl_callback",
    "policy_yclist_callback",
    "policy_ycarticle_callback",
    "policy_ycarticle_etl_callback",
    "policy_jmslist_callback",
    "policy_jmsarticle_callback",
    "policy_jmsarticle_etl_callback",
    "policy_qthlist_callback",
    "policy_qtharticle_callback",
    "policy_qtharticle_etl_callback",
    "policy_mdjlist_callback",
    "policy_mdjarticle_callback",
    "policy_mdjarticle_etl_callback",
    "policy_heihelist_callback",
    "policy_heihearticle_callback",
    "policy_heihearticle_etl_callback",
    "policy_suihualist_callback",
    "policy_suihuaarticle_callback",
    "policy_suihuaarticle_etl_callback",
    "policy_dxallist_callback",
    "policy_dxalarticle_callback",
    "policy_dxalarticle_etl_callback",
    "policy_fgwlnlist_callback",
    "policy_fgwlnarticle_callback",
    "policy_fgwlnarticle_etl_callback",
    "policy_gxtlnlist_callback",
    "policy_gxtlnarticle_callback",
    "policy_gxtlnarticle_etl_callback",
    "policy_kjtlnlist_callback",
    "policy_kjtlnarticle_callback",
    "policy_kjtlnarticle_etl_callback",
    "policy_jytlnlist_callback",
    "policy_jytlnarticle_callback",
    "policy_jytlnarticle_etl_callback",
    "policy_mztlnlist_callback",
    "policy_mztlnarticle_callback",
    "policy_mztlnarticle_etl_callback",
    "policy_cztlnlist_callback",
    "policy_cztlnarticle_callback",
    "policy_cztlnarticle_etl_callback",
    "policy_rstlnlist_callback",
    "policy_rstlnarticle_callback",
    "policy_rstlnarticle_etl_callback",
    "policy_nynclnlist_callback",
    "policy_nynclnarticle_callback",
    "policy_nynclnarticle_etl_callback",
    "policy_zjtlnlist_callback",
    "policy_zjtlnarticle_callback",
    "policy_zjtlnarticle_etl_callback",
    "policy_wsjklnlist_callback",
    "policy_wsjklnarticle_callback",
    "policy_wsjklnarticle_etl_callback",
    "policy_shenyanglist_callback",
    "policy_shenyangarticle_callback",
    "policy_shenyangarticle_etl_callback",
    "policy_dllist_callback",
    "policy_dllist1_callback",
    "policy_dlarticle_callback",
    "policy_dlarticle_etl_callback",
    "policy_anshanlist_callback",
    "policy_anshanlist1_callback",
    "policy_anshanlist2_callback",
    "policy_anshanarticle_callback",
    "policy_anshanarticle_etl_callback",
    "policy_fushunlist_callback",
    "policy_fushunlist1_callback",
    "policy_fushunarticle_callback",
    "policy_fushunarticle_etl_callback",
    "policy_benxilist_callback",
    "policy_benxiarticle_callback",
    "policy_benxiarticle_etl_callback",
    "policy_dandonglist_callback",
    "policy_dandonglist1_callback",
    "policy_dandongarticle_callback",
    "policy_dandongarticle_etl_callback",
    "policy_jzlist_callback",
    "policy_jzarticle_callback",
    "policy_jzarticle_etl_callback",
    "policy_yingkoulist_callback",
    "policy_yingkoulist1_callback",
    "policy_yingkouarticle_callback",
    "policy_yingkouarticle_etl_callback",
    "policy_fuxinlist_callback",
    "policy_fuxinarticle_callback",
    "policy_fuxinarticle_etl_callback",
    "policy_liaoyanglist_callback",
    "policy_liaoyanglist1_callback",
    "policy_liaoyangarticle_callback",
    "policy_liaoyangarticle_etl_callback",
    "policy_panjinlist_callback",
    "policy_panjinarticle_callback",
    "policy_panjinarticle_etl_callback",
    "policy_tielinglist_callback",
    "policy_tielingarticle_callback",
    "policy_tielingarticle_etl_callback",
    "policy_chaoyanglist_callback",
    "policy_chaoyanglist1_callback",
    "policy_chaoyangarticle_callback",
    "policy_chaoyangarticle_etl_callback",
    "policy_hldlist_callback",
    "policy_hldarticle_callback",
    "policy_hldarticle_etl_callback",
]


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


#   宁夏回族自治区发展和改革委员会
def policy_fzggwnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="gl-list-l"]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://fzggw.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99683'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fzggwnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fzggwnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="article-tit"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="gk-xl-table"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="gk-xl-table"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="gk-xl-table"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="gk-xl-table"]//th[contains(text(),"所属机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '宁夏回族自治' + organ

    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99683'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FZGGWNX"
    zt_provider = "fzggwnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区工业和信息化厅
def policy_gxtnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="fl ul"]/li|//div[@class="wzlb_list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://gxt.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99684'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('time/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//header//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="gk-xl-table"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table//td[contains(text(),"成文时间")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table//td[contains(text(),"所属机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@id="vsb_content_2"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99684'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTNX"
    zt_provider = "gxtnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区科学技术厅
def policy_kjtnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list_ul fs"]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://kjt.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99685'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="h1"]//text()|//h1[@class="title"]//text()|//div[@class="zwxxgk_ndbgwz"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"是否有效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="zz-xl-sec"]|//div[@class="infoinfo"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99685'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTNX"
    zt_provider = "kjtnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区教育厅
def policy_jytnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="gl-list-l nx-bor2"]/li|//div[@class="zfxxgk_zdgkc"]/ul/li|//div[@class="zfxxgk_zd1"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jyt.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99686'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)


    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="xl-tit"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"是否有效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@id="ofdneed"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99686'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTNX"
    zt_provider = "jytnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区民政厅
def policy_macnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="gl-con-l"]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://mca.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99687'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if 'zwgk/zcfg/zcjd' in callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''.join(li.xpath('span/text()|b/text()').extract()).strip()
            else:
                article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_macnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_macnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-tit"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if 'zm-table3' in html:
        pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''
        subject_word = ''
        written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"所属机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table//span[contains(text(),"文") and contains(text(),"号")]/following::td[1]/p/span/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table//span[contains(text(),"索 引 号")]/following::td[1]/p/span/text()').extract()).strip()
        subject = ''.join(res.xpath('//table//span[contains(text(),"主题分类")]/following::td[1]/p/span/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//table//span[contains(text(),"主") and contains(text(),"词")]/following::td[1]/p/span/text()').extract()).strip()
        written_date = ''
        legal_status = ''
        organ = ''.join(res.xpath('//table//span[contains(text(),"信息来源")]/following::td[1]/p/span/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@class="article-con"]|//div[@class="gkxl-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99687'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MACNX"
    zt_provider = "macnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区财政厅
def policy_cztnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="gl-list"]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://czt.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99688'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="gk-xl-t"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"所属机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@class="gk-artcle-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99688'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTNX"
    zt_provider = "cztnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区人力资源和社会保障厅
def policy_hrssnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://hrss.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99689'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrssnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xwnr_title"]//text()|//h1[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"所属机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@id="info_content"]|//div[contains(@class,"scroll_cont")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99689'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSNX"
    zt_provider = "hrssnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="info_content_opt"]/p)')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区农业农村厅
def policy_nynctnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://nynct.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99690'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="tjh_gkzn_contit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"文 号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"生成日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"所属机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@class="tjh_gkzn_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99690'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCTNX"
    zt_provider = "nynctnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区住房和城乡建设厅
def policy_jstnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="sj-list mt10"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://jst.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99691'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/time/text()|time/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jstnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jstnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//header//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"文 号：")]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"索引号：")]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"生成日期：")]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="det"]//span[contains(text(),"所属机构：")]/text()').extract()).strip()
    # if organ.startswith('自治区'):
    #     organ = '宁夏回族' + organ

    fulltext_xpath = '//div[@id="ofdneed"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99691'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JSTNX"
    zt_provider = "jstnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区卫生健康委员会
def policy_wsjkwnxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="zfxxgk_zd1"]|//div[@class="zfxxgk_zd2"]/ul/li|//div[@class="zfxxgk_zdgkc"]/ul/li|//ul[@class="gl-list-l nx-bor2"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://wsjkw.nx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99692'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwnxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwnxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-tit"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = res.xpath('//span[contains(text(),"发表时间：")]/text()').extract_first()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"是否有效")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '宁夏回族' + organ

    fulltext_xpath = '//div[contains(@class,"article-con")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99692'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWNX"
    zt_provider = "wsjkwnxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区银川市
def policy_yinchuanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xzgfxwj' in callmodel.sql_model.list_rawid or 'qtzfwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tab"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.yinchuan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99693'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                if 'bmwj_7733' in callmodel.sql_model.list_rawid:
                    article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                else:
                    article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="gl-right"]/ul/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[1]/@href').extract_first()
                base_url = f'http://www.yinchuan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99693'
                article_json["url"] = url
                article_json["title"] = li.xpath('a[1]/font/text()|a[1]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/b/text()|b/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yinchuanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yinchuanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"名　称：")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="article"]//h1//text()|//div[@class="com"]//h3//text()|//div[@class="xl"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"索 引 号：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"效力状态：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="article-info"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="cm-table-fixed szf_zw-table"]//th[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '银川' + organ

    fulltext_xpath = '//div[@class="xxgk-xl-text"]|//div[@class="con"]|//div[@class="text-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99693'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YINCHUAN"
    zt_provider = "yinchuangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区石嘴山市
def policy_shizuishanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'szfwj/' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tab"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99694'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"xj-list")]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99694'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('span/text()|b/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shizuishanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shizuishanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]//text()|//h3[@class="art-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"是否有效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '石嘴山' + organ

    fulltext_xpath = '//div[@class="art-cont"]|//div[@class="zz-top-shadow"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99694'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHIZUISHAN"
    zt_provider = "shizuishangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区吴忠市
def policy_wuzhonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xxgk/gz/' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tab"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.wuzhong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99695'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"news-list")]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href|p/a/@href').extract_first()
                base_url = f'http://www.wuzhong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99695'
                article_json["url"] = url
                article_json["title"] = li.xpath('p/a/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wuzhongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wuzhongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]//text()|//h1[@class="article-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '吴忠' + organ

    fulltext_xpath = '//div[@class="fl-ar-cont"]|//div[@class="zz-xl-ct"]|//div[@class="xl-con-jd"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99695'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WUZHONG"
    zt_provider = "wuzhonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区固原市
def policy_nxgylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zwgk/zcwj/qtwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//tr[@class="xxgk_nav_con"]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a[1]/@href').extract_first()
                base_url = f'http://www.nxgy.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99696'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a[1]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"news-list")]/li|//ul[@class="pageList newsList"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('p/a/@href|a/@href').extract_first()
                base_url = f'http://www.nxgy.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99696'
                article_json["url"] = url
                article_json["title"] = li.xpath('p/a/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nxgyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nxgyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]//text()|//div[contains(@class,"xl_con_tit")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xl_tab2"]//td[contains(text(),"文 号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xl_tab2"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xl_tab2"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xl_tab2"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '固原' + organ

    fulltext_xpath = '//div[@class="xl_con_con"]|//div[@class="xl-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99696'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NXGY"
    zt_provider = "nxgygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   宁夏回族自治区中卫市
def policy_nxzwlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'gfxwj/' in callmodel.sql_model.list_rawid or 'szfbwj' in callmodel.sql_model.list_rawid or 'qtwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tab"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99697'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="ztzl-tabItem"]/div|//div[@class="zfxxgk_zdgkc"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99697'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|a/b/text()|b/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nxzwarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nxzwarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="title"]//text()|//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zm-table3"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '中卫' + organ

    fulltext_xpath = '//div[@class="tyxl-content"]|//div[@class="zz-xl-sec"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99697'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NXZW"
    zt_provider = "nxzwgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区发展和改革委员会
def policy_xjdrcxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://xjdrc.xinjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99698'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjdrcxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjdrcxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="syhbox"]//h2//text()|//h1[@class="data-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号：")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99698'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJDRCXINJIANG"
    zt_provider = "xjdrcxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区工业和信息化厅
def policy_gxtxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list pt20"]/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://gxt.xinjiang.gov.cn'
            url = href if 'http' in href else base_url + href
            if 'shtm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99699'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="syhbox"]//h2//text()|//div[@class="p20 contentbox"]//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"成文日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文机关：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@id="news_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99699'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTXINJIANG"
    zt_provider = "gxtxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区科学技术厅
def policy_kjtxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://kjt.xinjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99700'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gknbxq_top"]//h2//text()|//div[@class="detail"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号：")]/parent::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    # if organ.startswith('自治区'):
    #     organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99700'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTXINJIANG"
    zt_provider = "kjtxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区教育厅
def policy_jytxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="tab"]/ul/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://jyt.xinjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99701'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gknbxq_top"]//h2//text()|//div[@class="list_newxq"]//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"成文日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文机关：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@class="neirong"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99701'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTXINJIANG"
    zt_provider = "jytxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区民政厅
def policy_mztxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list_con_content"]/ul/li|//div[@class="panel-list-right-main"]/ul/li|//div[@id="wzlm"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://mzt.xinjiang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://mzt.xinjiang.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99702'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = ''.join(li.xpath('span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content_main_title"]//text()|//div[@class="gknbxq_top"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号：")]/parent::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    # if organ.startswith('自治区'):
    #     organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="content_main_con"]|//div[@class="gknbxq_detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99702'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTXINJIANG"
    zt_provider = "mztxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区财政厅
def policy_cztxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="rightlist fleft"]/ul/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://czt.xinjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99703'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="article-title"]//text()|//div[@id="gknbxq_box"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//div[@class="article"]/h3/text()|//p[contains(text(),"发布时间：")]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"成文日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文机关：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('自治区'):
        organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99703'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTXINJIANG"
    zt_provider = "cztxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区人力资源和社会保障厅
def policy_rstxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list_right_main_ul"]/li|//ul[@class="com-pic-news-list"]/li|//div[@id="wzlm"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|a[1]/@href').extract_first()
            base_url = f'http://rst.xinjiang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://rst.xinjiang.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99704'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/text()|a[@class="cpn-title"]/text()|a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span[@class="cpn-t cpn-date"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']

    res = Selector(text=html)

    pub_date = ''.join(res.xpath('//publishtime//text()|//div[@class="gknbxq_top"]/p/text()').extract()).strip()
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="h2"]//text()|//div[@class="gknbxq_top"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号：")]/parent::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    # if organ.startswith('自治区'):
    #     organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@id="NewsContent"]|//div[@class="gknbxq_detail"]|//div[@class="article-detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99704'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTXINJIANG"
    zt_provider = "rstxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区农业农村厅
def policy_nynctxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list pt20"]/li|//div[@class="gknr_list"]/dl/dd|//ul[@class="innerList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://nynct.xinjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99705'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="p20 contentbox"]//h3//text()|//div[@class="gknbxq_top"]//h2//text()|//p[@class="detail-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号：")]/parent::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    # if organ.startswith('自治区'):
    #     organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@id="news_content"]|//div[@class="gknbxq_detail"]|//div[@class="detail-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99705'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCTXINJIANG"
    zt_provider = "nynctxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="detail-files"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区住房和城乡建设厅
def policy_zjtxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list pt20"]/li|//ul[@class="innerList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://zjt.xinjiang.gov.cn'
            url = href if 'http' in href else base_url + href
            if 'shtm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99706'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="p20 contentbox"]//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"发布机关")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('自治区'):
        organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@id="NewsContent"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99706'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTXINJIANG"
    zt_provider = "zjtxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="detail-files"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区卫生健康委员会
def policy_wjwxinjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://wjw.xinjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99707'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwxinjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwxinjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="d_m_top"]//h2//text()|//div[@class="gknbxq_top"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发文字号：")]/parent::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    # if organ.startswith('自治区'):
    #     organ = '新疆维吾尔' + organ

    fulltext_xpath = '//div[@class="d_m_centre"]|//div[@class="gknbxq_detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99707'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WJWXINJIANG"
    zt_provider = "wjwxinjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区乌鲁木齐市
def policy_urumqilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(";共(\d+)页&", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall(">共 (\d+) 页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"commonList_dot")]/li|//div[@class="section"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.urumqi.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99708'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_urumqiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_urumqiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = clean_pubdate(article_json['pub_date'])
    # pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//p[@class="am-article-meta am-text-sm am-fl"]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"发文单位")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '乌鲁木齐' + organ

    fulltext_xpath = '//article[contains(@class,"am-padding-horizontal-lg")]|//div[@id="info_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99708'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "URUMQI"
    zt_provider = "urumqigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区克拉玛依市
def policy_klmylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        max_count = html_json['data']['total']
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.klmy.gov.cn'
            url = parse.urljoin(base_url, href)
            url = url.replace('http', 'https')
            # url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99709'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_klmyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_klmyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="right-hd"]//text()|//div[@class="ewb-news-hd"]//h3//text()|/h1[@class="article_title"]/ucaptitle//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="meta_main"]//label[text()="文"]/parent::li[1]/div/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="meta_main"]//label[text()="索"]/parent::li[1]/div/text()').extract()).strip()
    # subject = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="meta_main"]//label[contains(text(),"成文日期：")]/parent::li[1]/div/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="meta_main"]//label[contains(text(),"发文机关：")]/parent::li[1]/div/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '克拉玛依' + organ

    fulltext_xpath = '//div[contains(@class,"ewb-news-bd")]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99709'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KLMY"
    zt_provider = "klmygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区吐鲁番市
def policy_tlflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'gfxwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//tbody[@id="zc_list"]/tr|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="coninfo"]/div[1]/a/@href|th[@class="bt"]/a/@href|a/@href').extract_first()
            base_url = f'http://www.tlf.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99710'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="coninfo"]/div[1]/a/text()|th[@class="bt"]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span[@class="time"]/text()|th[@class="fbrq"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tlfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tlfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"公开日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '吐鲁番' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99710'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TLF"
    zt_provider = "tlfgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区哈密市
def policy_hamilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(";\d+/(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('p_next p_fun"><a href="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{ total_page - page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if page_index == 0:
            base_url = f'http://www.hami.gov.cn/{callmodel.sql_model.list_rawid}.htm'
        else:
            base_url = f'http://www.hami.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
        if 'zxzx1/tzgg1' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//span[contains(@id,"dynclicks")]/parent::li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[1]/@href').extract_first()
                url = parse.urljoin(base_url, href)
                # url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99711'
                article_json["url"] = url
                article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
                pub_date_info = ''.join(li.xpath('text()').extract()).strip()
                article_json["pub_date"] = re.findall("\d{4}/\d{2}/\d{2}", pub_date_info)[0].strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zfwj1' in callmodel.sql_model.list_rawid or 'gfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div/table[contains(@class,"winstyle")]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                url = parse.urljoin(base_url, href)
                # url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99711'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                if 'gfxwj' in callmodel.sql_model.list_rawid:
                    article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                else:
                    article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[contains(@class,"winstyle54655")]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                if not href:
                    continue
                url = parse.urljoin(base_url, href)
                # url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99711'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hamiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hamiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="right-hd"]//text()|//div[@class="ewb-news-hd"]//h3//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(@class,"timestyle59463")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    # pub_no = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table//tr/td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table//tr/td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()")]/parent::li[1]/text()').extract()).strip()
    # # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table//tr/td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '哈密' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99711'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HAMI"
    zt_provider = "hamigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//span[contains(text(),"附件")]/ancestor::td[1])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区阿克苏地区
def policy_akslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        data_str = re.findall('\[\{"infolist":.*?\}\]\}\]',para_dicts["data"]["1_1"]['html'])[0]
        # if 'zxzx1/tzgg1' in callmodel.sql_model.list_rawid:
        # li_list = json.loads(data_str)[0]['infolist']
        for li_list in json.loads(data_str):
            for li in li_list['infolist']:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                if not href:
                    continue
                base_url = 'http://www.aks.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99712'
                article_json["url"] = url
                article_json["title"] = li['title']
                date_stamp = datetime.datetime.fromtimestamp(int(li['releasetime']) / 1000)
                article_json["pub_date"] = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_aksarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_aksarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    pub_no = ''.join(res.xpath('//table[@class="tablec tb_web"]//td[contains(text(),"文 号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tablec tb_web"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="tablec tb_web"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//article[contains(@class,"am-padding-horizontal-lg")]//table//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tablec tb_web"]//td[contains(text(),"成文时间")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"有效性：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tablec tb_web"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('地区'):
        organ = '阿克苏' + organ

    if 'zoomc' in html:
        fulltext_xpath = '//div[@id="zoomc"]'
    else:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99712'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AKS"
    zt_provider = "aksgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区喀什地区
def policy_kashilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="xlist"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.kashi.gov.cn'
                url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99713'
                article_json["url"] = url
                article_json["title"] = li.xpath('a//p[@class="xlist-right-title"]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('a//p[@class="xlist-right-meta"]/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@id="zwgkTable"]/table/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.kashi.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = href if 'http' in href else base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99713'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kashiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kashiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="xxgk-tt-title"]//text()|//h2[@class="detail-right-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"信息来源")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('地区'):
        organ = '喀什' + organ

    fulltext_xpath = '//div[@class="detail-right-content"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99713'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KASHI"
    zt_provider = "kashigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区和田地区
def policy_xjhtlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("条/(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="ulist"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.xjht.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'itemid=' not in url:
                continue
            rawid = url.split('itemid=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99714'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjhtarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjhtarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@id="title"]//text()|//h2[@id="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # # subject = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"主题分类：")]/parent::li[1]/text()').extract()).strip()
    # # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"信息来源")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '喀什' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@class="_detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99714'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJHT"
    zt_provider = "xjhtgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区昌吉回族自治州
def policy_cjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall('total: (\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/15)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="section-main-nav"]/ul/li|//div[@class="xxgkzd"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="zctext"]/a/@href|a/@href').extract_first()
            base_url = f'http://www.cj.gov.cn/{callmodel.sql_model.list_rawid}.html'
            # base_url = f'http://www.cj.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99715'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="zctext"]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[@class="zcdate"]/text()|a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    if len(pub_date) < 8:
        raise Exception
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="divTitle"]//text()|//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="Article_xx"' in html:
        pub_no = ''.join(res.xpath('//div[@class="Article_xx"]//strong[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="Article_xx"]//strong[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="Article_xx"]//strong[contains(text(),"信息分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''
        # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
        # legal_status = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="Article_xx"]//strong[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
        # written_date = ''.join(res.xpath('//div[@class="syhbox"]//font[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
        # legal_status = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"发文单位")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('州'):
        organ = '新疆维吾尔自治区昌吉回族自治' + organ

    fulltext_xpath = '//div[@class="cent_nr_box"]|//div[@id="info_content"]|//font[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99715'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CJ"
    zt_provider = "cjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区博尔塔拉蒙古自治州
def policy_xjbozlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>/(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page-page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="gblist"]//table/tr|//section[@class="n_list liebiao"]/ul/li|//div[@class="gblist"]//tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href|a/@href').extract_first()
            if page_index == 0:
                base_url = f'http://www.xjboz.gov.cn/{callmodel.sql_model.list_rawid}.htm'
            else:
                base_url = f'http://www.xjboz.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
            # base_url = f'http://www.cj.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99716'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()|a/font/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[2]/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjbozarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjbozarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="divTitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table/tr/td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/span/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table/tr/td[contains(text(),"成文日期")]/following::td[1]/span/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//table/tr/td[contains(text(),"失效日期")]/following::td[1]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="zwgkTableDetail"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[contains(@class,"table")]//td[contains(text(),"发文单位")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('州'):
    #     organ = '新疆维吾尔自治区昌吉回族自治' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99716'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJBOZ"
    zt_provider = "xjbozgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区巴音郭楞蒙古自治州
def policy_xjbzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xjbz/bz' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="out_list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('ul/li[@class="xxmc"]/a[2]/@href').extract_first()
                # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.xjbz.gov.cn'
                url = href if 'http' in href else base_url + href
                # url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99717'
                article_json["url"] = url
                article_json["title"] = li.xpath('ul/li[@class="xxmc"]/a[2]/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('ul/li[@class="fwrq"]/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="list"]/li|//div[@class="gknr_list"]/dl/dd')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.xjbz.gov.cn'
                url = href if 'http' in href else base_url + href
                # url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99717'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjbzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjbzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@class="title"]//text()|//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[@class="publishedTime"]/text()|//div[@data-article="publishedTime"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[@data-article="zfxxgkysj"]//li[contains(text(),"发文字号：")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@data-article="zfxxgkysj"]//li[contains(text(),"索引号：")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@data-article="zfxxgkysj"]//li[contains(text(),"主题分类：")]//text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@data-article="zfxxgkysj"]//li[contains(text(),"成文日期：")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    legal_status = ''.join(res.xpath('//div[@data-article="zfxxgkysj"]//li[contains(text(),"有效性：")]//text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@data-article="zfxxgkysj"]//li[contains(text(),"发文机关：")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('州'):
        organ = '新疆维吾尔自治区巴音郭楞蒙古自治' + organ

    fulltext_xpath = '//p[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99717'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJBZ"
    zt_provider = "xjbzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区克孜勒苏柯尔克孜自治州
def policy_xjkzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('docpuburl', '')
            if not href:
                href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.xjkz.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99718'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjkzlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="section"]/ul/li|//ul[@class="am-list news-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.xjkz.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99718'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjkzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjkzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('州'):
        organ = '新疆维吾尔自治区克孜勒苏柯尔克孜自治' + organ

    fulltext_xpath = '//div[@id="text"]|//div[@id="info_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99718'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJKZ"
    zt_provider = "xjkzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区伊犁哈萨克自治州
def policy_xjyllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xjbz/bz' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="newsList"]/li|//div[@class="row"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[@class="m"]/@href').extract_first()
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.xjyl.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99719'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[@class="m"]/text()').extract_first().strip()
            article_json["pub_date"] = ''.join(li.xpath('span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjylarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjylarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="data-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发文字号：")]//text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"索引号：")]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"主题分类：")]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"成文日期：")]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"有效性：")]//text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="syhbox"]//li[contains(text(),"发布机构：")]//text()').extract()).strip()
    # if organ.startswith('州'):
    #     organ = '新疆维吾尔自治区巴音郭楞蒙古自治' + organ

    fulltext_xpath = '//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99719'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJYL"
    zt_provider = "xjylgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区塔城地区
def policy_xjtclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('/共(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"/{total_page-page}.htm"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        base_url = f'http://www.xjtc.gov.cn/{callmodel.sql_model.list_rawid}'
        li_list = res.xpath('//ul[@class="infoList"]/li|//ul[@class="newsList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[last()]/@href').extract_first()

            # base_url = f'http://www.cj.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = href if 'http' in href else base_url + href
            if 'xjtc' not in url:
                continue
            rawid = url.split('/')[-1].split('#')[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99720'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[last()]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjtcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjtcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="nr-title"]//text()').extract()).strip()
    if not title:
        title =  ''.join(res.xpath('//h2[contains(@class,"title")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="ueditortable"' in html:
        index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"有") and contains(text(),"性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        index_no = ''.join(res.xpath('//table[@class="zwnrbox"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="zwnrbox"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        legal_status = ''
        organ = ''.join(res.xpath('//table[@class="zwnrbox"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('地区'):
        organ = '新疆维吾尔自治区塔城' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]|//div[@class="conTxt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99720'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJTC"
    zt_provider = "xjtcgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   新疆维吾尔自治区阿勒泰地区
def policy_xjaltlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page ):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.xjalt.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99721'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjaltlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("total: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="ewb-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.nxzw.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.xjalt.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99721'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xjaltarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xjaltarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@class="ewb-article-tt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"文  号")]/following::td[1]/span/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"索引号")]/following::td[1]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"主题分类")]/following::td[1]/span/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"生成日期")]/following::td[1]/span/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"发布机构")]/following::td[1]/span/text()').extract()).strip()
    if organ.startswith('地区'):
        organ = '新疆维吾尔自治区阿勒泰' + organ

    fulltext_xpath = '//div[@class="ewb-article-content"]|//div[@class="ewb-article-info"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99721'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XJALT"
    zt_provider = "xjaltgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省发展和改革委员会
def policy_drchljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"], "url_part": list_json["url_part"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://drc.hlj.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99722'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            if '510' == callmodel.sql_model.list_rawid:
                pub_date = li.xpath('span/text()').extract_first().strip()
            elif '146' == callmodel.sql_model.list_rawid or '518' == callmodel.sql_model.list_rawid:
                pub_date = li.xpath('td[4]/text()').extract_first().strip()
                article_json["pub_no"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
            else:
                pub_date = li.xpath('td[2]/text()').extract_first().strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_drchljlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://drc.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99722'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_drchljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_drchljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="wzy-title"]//text()|//div[@class="sp_title"]//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"发文日期:")]/parent::td[1]/text()').extract()).strip()
    # # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//li[contains(text(),"有效性")]/span/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"发布机构:")]/parent::td[1]/text()').extract()).strip()
    # if organ.startswith('省'):
    #     organ = '黑龙江' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99722'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DRCHLJ"
    zt_provider = "drchljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省工业和信息化厅
def policy_gxthljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / html_json['data']['rows'])
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://gxt.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99723'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxthljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxthljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="mes syh"]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '重庆' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99723'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTHLJ"
    zt_provider = "gxthljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省科学技术厅
def policy_hljkjtlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://kjt.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99724'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hljkjtarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hljkjtarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@id="biaoti"]//text()|//div[@class="Content_title"]//h1//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '重庆' + organ

    fulltext_xpath = '//div[@class="xlynr"]|//div[@id="acontent"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99724'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HLJKJT"
    zt_provider = "hljkjtgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//dl[@class="fjxz"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省教育厅
def policy_jythljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://jyt.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99725'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jythljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jythljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()

    fulltext_xpath = '//div[@class="xlynr"]|//div[@id="acontent"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99725'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTHLJ"
    zt_provider = "jythljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省民政厅
def policy_mzthljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        rows = html_json['data']['rows']
        total = html_json['data']['total']
        total_page = math.ceil(total/rows)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'http://mzt.hlj.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99726'
            article_json["url"] = url
            article_json["title"] = li['title']
            pub_date = li.get('publishedTimeStr', '')
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzthljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzthljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    index_no = ''.join(res.xpath('//div[@class="detail_xxgk_info zcwj"]//b[contains(text(),"索")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="detail_xxgk_info zcwj"]//label[contains(text(),"发布机构")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '黑龙江' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99726'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTHLJ"
    zt_provider = "mzthljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省财政厅
def policy_czthljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://czt.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99727'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czthljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czthljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gkzn_title"]//text()|//div[@class="content_nr"]//h4//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # pub_no = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"发文文号")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"分类")]/following::span[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"关键字")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '黑龙江' + organ

    fulltext_xpath = '//div[@class="gkzn_word"]|//div[@class="neirong"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99727'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTHLJ"
    zt_provider = "czthljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省人力资源和社会保障厅
def policy_hrsshljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://hrss.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99728'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrsshljlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        total_page = math.ceil(html_json['data']['total'] / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['records']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = li['id']
            url = f'http://hrss.hlj.gov.cn/hrss/login/seeZwxxDetail.ks?id={rawid}'
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99728'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['fbdate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrsshljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrsshljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="news-title"]//text()|//div[@class="dbt"]//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # pub_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发文机构：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '黑龙江' + organ

    fulltext_xpath = '//div[@class="zwnr"]|//div[@class="news-con"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99728'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSHLJ"
    zt_provider = "hrsshljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省农业农村厅
def policy_nyncthljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://nynct.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99729'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncthljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncthljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="l-n2"]//text()|//div[@class="content-body"]//h3//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # pub_no = ''.join(res.xpath('//div[@class="content-head"]//li[contains(text(),"发文字号")]/following::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="content-head"]//li[contains(text(),"索") and contains(text(),"号")]/following::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="content-head"]//li[contains(text(),"分") and contains(text(),"类：")]/following::li[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"关键字")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="content-head"]//li[contains(text(),"发文机构")]/following::li[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '黑龙江' + organ

    fulltext_xpath = '//div[@class="content-body"]|//div[@class="l-n6 f-div"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99729'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCTHLJ"
    zt_provider = "nyncthljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省住房和城乡建设厅
def policy_zfcxjsthljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://zfcxjst.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99730'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjsthljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zfcxjsthljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="dbt"]//text()|//div[@class="xly"]//h2//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # pub_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '黑龙江' + organ

    fulltext_xpath = '//div[@class="zwnr"]|//div[@class="main"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99730'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZFCXJSTHLJ"
    zt_provider = "zfcxjsthljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省卫生健康委员会
def policy_wsjkwhljlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://wsjkw.hlj.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99731'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwhljarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwhljarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gknb_top"]/p//text()|//div[@class="main"]//h4//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # pub_no = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"文号")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"关键字")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//ul[@class="ul_table"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label[text()="发布机构："]//following::div[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '黑龙江' + organ

    fulltext_xpath = '//div[@class="gknb_content"]|//div[@class="main"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99731'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWHLJ"
    zt_provider = "wsjkwhljgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省哈尔滨市
def policy_harbinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.harbin.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99732'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_harbinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_harbinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="title"]/text()|//div[@class="sp_title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//li[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table[@class="xx_tab"]//td[contains(text(),"发文日期：")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if 'class="detail detail_info"' in html:
        pub_no = ''.join(res.xpath(
            '//div[contains(@class,"detail_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath(
            '//div[contains(@class,"detail_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
        subject = ''.join(res.xpath(
            '//div[contains(@class,"detail_info")]//label/b[text()="主题分类："]//following::div[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath(
            '//div[contains(@class,"detail_info")]//label/b[text()="时"]//following::div[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="xx_tab"]//td[contains(text(),"文号：")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="xx_tab"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="xx_tab"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="xx_tab"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"发布机构:")]/parent::td[1]/text()').extract()).strip()
    # if organ.startswith('省'):
    #     organ = '江苏' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="detailContent"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99732'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HARBIN"
    zt_provider = "harbingovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省齐齐哈尔市
def policy_qqhrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("changePage\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = { "messagekey": list_json["messagekey"],"menuid": list_json["menuid"],"departId": list_json["departId"],"list_url": list_json["list_url"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '8' == callmodel.sql_model.list_rawid:
            li_list = res.xpath('//span[@class="circle"]/ancestor::tr[1]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                # base_url = f'http://cqjlp.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # url = parse.urljoin(base_url, href)
                base_url = f'http://www.qqhr.gov.cn/'
                url = href if 'http' in href else base_url + href
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99733'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//tbody[@id="list-content"]/tr|//div[@class="rules-content"]/a')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href|@href').extract_first()
                # base_url = f'http://cqjlp.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # url = parse.urljoin(base_url, href)
                base_url = f'http://www.qqhr.gov.cn/'
                url = href if 'http' in href else base_url + href
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99733'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()|span[1]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()|span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qqhrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qqhrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="dbt"]//text()|//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"成文日期：")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"有效性：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '齐齐哈尔' + organ

    fulltext_xpath = '//div[@id="counet"]|//div[@class="file"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99733'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QQHR"
    zt_provider = "qqhrgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省鸡西市
def policy_jixilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        rows = html_json['data']['rows']
        total = html_json['data']['total']
        total_page = math.ceil(total / rows)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'http://www.jixi.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = href if 'http' in href else base_url + href
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99734'
            article_json["url"] = url
            article_json["title"] = li['title']
            pub_date = li.get('publishedTimeStr', '')
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jixiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jixiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="articleTitle"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="detail_xxgk_info zcwj"]//label[text()="文"]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="detail_xxgk_info zcwj"]//b[contains(text(),"索")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # # subject_word = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="detail_xxgk_info zcwj"]//label[text()="成文日期："]/following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="detail_xxgk_info zcwj"]//label[text()="时"]/following::div[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '齐齐哈尔' + organ

    fulltext_xpath = '//div[@class="testCon"]|//div[@class="articleCon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99734'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIXI"
    zt_provider = "jixigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="linkCon"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省鹤岗市
def policy_heganglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("showListPage\(\d+, (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="lb_right"]/ul/li|//div[@class="xxgk-list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://cqjlp.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # url = parse.urljoin(base_url, href)
            base_url = f'http://www.hegang.gov.cn'
            url = href if 'http' in href else base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99735'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_heganglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("共&nbsp;(\d+)&", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            # base_url = f'http://www.jixi.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # url = parse.urljoin(base_url, href)
            base_url = f'http://www.hegang.gov.cn'
            url = href if 'http' in href else base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99735'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hegangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hegangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xxgk-right xxgk-end"]//h1//text()|//div[@class="nr_con"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    # written_date = article_json.get('written_date', '')
    legal_status = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '鹤岗' + organ

    fulltext_xpath = '//div[@class="xxgk-end-body"]|//div[@class="nr_p"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99735'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HEGANG"
    zt_provider = "heganggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省双鸭山市
def policy_shuangyashanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">\d+/(\d+)<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="newslist"]/li|//div[@class="rightList"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if '2' == callmodel.sql_model.list_rawid:
                base_url = f'http://www.shuangyashan.gov.cn'
                url = href if 'http' in href else base_url + href
                art_url = url
            else:
                url = f'http://zwgk.shuangyashan.gov.cn/NewCMS/index/html/zwgkx/index_zfwj.jsp?id={href.split("=")[-1].strip()}'
                art_url = f'http://zwgk.shuangyashan.gov.cn/NewCMS/index/html/zwgkx/index/detail_wj.jsp?id={href.split("=")[-1].strip()}'
            rawid = url.split('=')[-1].strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99736'
            article_json["url"] = url
            article_json["art_url"] = art_url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shuangyashanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shuangyashanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="article_title"]//text()|//h1[@class="rightTextTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"文号：")]/parent::li[1]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].replace('-', '').strip()
    index_no = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"分类：")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"主 题 词：")]/parent::li[1]/text()').extract()).strip()
    subject_word = subject_word.split('：')[-1].replace('-', '').strip()
    # written_date = article_json.get('written_date', '')
    legal_status = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"时效：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '双鸭山' + organ

    fulltext_xpath = '//div[@id="rightTextP"]|//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99736'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHUANGYASHAN"
    zt_provider = "shuangyashangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省大庆市
def policy_daqinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.daqing.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99737'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_daqingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_daqingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//strong[contains(text(),"名称：")]/parent::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'meta_main' in html:
        pub_no = ''.join(res.xpath(
            '//div[contains(@class,"meta_main")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath(
            '//div[contains(@class,"meta_main")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
        subject = ''
        subject_word = ''
        legal_status = ''
        organ = ''
    else:
        pub_no = ''.join(res.xpath('//strong[contains(text(),"文号：")]/parent::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//strong[contains(text(),"索引号:")]/parent::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//strong[contains(text(),"分类：")]/parent::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//strong[contains(text(),"关键字：")]/parent::td[1]/text()').extract()).strip()
        # written_date = article_json.get('written_date', '')
        legal_status = ''.join(res.xpath('//strong[contains(text(),"时效：")]/parent::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//strong[contains(text(),"发布机构：")]/parent::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '大庆' + organ

    fulltext_xpath = '//div[contains(@class,"pic1")]|//td[@class="sBiyTnrzZe"]|//div[contains(@class,"social-share")]/ancestor::table[1]/ancestor::table[1]|//div[@id="zoomcon"]'
    fulltext = ''.join(res.xpath(fulltext_xpath).extract())
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99737'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DAQIANG"
    zt_provider = "daqinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省伊春市
def policy_yclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.yc.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99738'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ycarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ycarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="txtbox"]//h1//text()|//div[@class="content"]//h1//text()|//div[@class="title_zn1"][1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//div[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[contains(@class,"table-item")]//span[contains(text(),"发文字号")]/following::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"table-item")]//span[contains(text(),"成文日期")]/following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"table-item")]//span[contains(text(),"效：")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"table-item")]//span[contains(text(),"发文机关")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '伊春' + organ

    fulltext_xpath = '//div[@class="guide"]|//div[@class="con"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99738'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YC"
    zt_provider = "ycgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省佳木斯市
def policy_jmslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">/(\d+)<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="jms_listUl"]/li|//div[@class="rightList"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if '000100050009' in callmodel.sql_model.list_rawid:
                base_url = f'https://www.jms.gov.cn'
                url = href if 'http' in href else base_url + href
                art_url = url
            else:
                base_url = f'https://zejm.jms.gov.cn'
                url = href if 'http' in href else base_url + href
                art_url = url.replace('content/', 'content/cn/').replace('op_', 'op_cn_')
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99739'
            article_json["url"] = url
            article_json["art_url"] = art_url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if not li.xpath('span/text()').extract_first():
                continue
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jmsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jmsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="jms_contentT"]//h1//text()|//h1[@class="rightTextTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"文号：")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"索引号：")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"分类：")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"关键词：")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"成文日期：")]/parent::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"时效：")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="rightIndexes"]//b[contains(text(),"发布机构：")]/parent::li[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '佳木斯' + organ

    fulltext_xpath = '//div[@id="rightTextP"]|//div[@class="jms_contentC"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99739'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JMS"
    zt_provider = "jmsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省七台河市
def policy_qthlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//td[@class="list12"]/parent::tr|//ul[@id="_fill"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.qth.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99740'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/@title|a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()|.//span[@class="timeSpan"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qtharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qtharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@class="jms_contentT"]//h1//text()|//h1[@class="rightTextTitle"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"文号：")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '七台河' + organ

    fulltext_xpath = '//span[@id="zoom"]|//div[@class="zwnr"]|//div[@class="wz_zoom  scroll_cont ScrollStyle"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99740'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QTH"
    zt_provider = "qthgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省牡丹江市
def policy_mdjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'xzgfxwj_8188' in callmodel.sql_model.list_rawid:
            max_count = re.findall('m_nRecordCount = "(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count/15)
        else:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}', "url_part": list_json["url_part"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xzgfxwj_8188' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@id="documentContainer"]/div[@class="row"]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('li[@class="WJmc"]//a/@href').extract_first()
                base_url = f'http://zwgk.mdj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # base_url = f'https://www.jms.gov.cn'
                # url = href if 'http' in href else base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99741'
                article_json["url"] = url
                article_json["title"] = li.xpath('li[@class="WJmc"]//a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('li[@class="WJfbrq"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="min_h500 cont_list"]/ul/li|//ul[@id="_fill"]/li|//ul[@class="mt10"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if 'shizheng/tztg' in callmodel.sql_model.list_rawid:
                    base_url = f'http://www.mdj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                else:
                    base_url = f'http://zwgk.mdj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # base_url = f'https://www.jms.gov.cn'
                # url = href if 'http' in href else base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99741'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('a/span/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mdjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mdjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="end_tit"]//h1//text()|//div[@class="docTitleCls"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="headInfo"]//b[contains(text(),"文　　号:")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="headInfo"]//b[contains(text(),"索 引 号:")]/parent::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="headInfo"]//b[contains(text(),"分类:")]/following::span[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="headInfo"]//b[contains(text(),"主 题 词:")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="headInfo"]//b[contains(text(),"发布机构:")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '牡丹江' + organ

    fulltext_xpath = '//div[@id="ContentRegion"]|//div[@class="end_txt"]|//div[@id="end_txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99741'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MDJ"
    zt_provider = "mdjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省黑河市
def policy_heihelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.heihe.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99742'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_heihearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_heihearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content-title"]//h1//text()|//div[@class="gblist"]//h2//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"detail_xxgk_info")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    # pub_no = ''.join(res.xpath('//ul[@class="xxgk-fbt"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//ul[@class="xxgk-fbt"]//span[contains(text(),"发文字号：")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="xxgk-fbt"]//span[contains(text(),"主题分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="headInfo"]//b[contains(text(),"主 题 词:")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[@class="xxgk-fbt"]//span[contains(text(),"成文日期：")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//ul[@class="dbtsf"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="xxgk-fbt"]//span[contains(text(),"发文机关：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '黑河' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99742'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HEIHE"
    zt_provider = "heihegovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="vsb_content"]/following::p[1])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省绥化市
def policy_suihualist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.suihua.gov.cn'
            url = href if 'http' in href else base_url + href
            url = url.replace('//', '/').replace(':/', '://')
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99743'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_suihuaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_suihuaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath(
        '//div[@class="gkzn_title"]//text()|//div[@class="content_nr"]//h4//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="成文日期："]//following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="时"]//following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="发布机构："]//following::div[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '绥化' + organ

    fulltext_xpath = '//div[@class="gkzn_word"]|//div[@class="neirong"]|//div[@id="zoomcon"]|//div[@class="ListEnd_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99743'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SUIHUA"
    zt_provider = "suihuagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   黑龙江省大兴安岭地区
def policy_dxallist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            # base_url = f'http://www.shizuishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.dxal.gov.cn'
            url = href if 'http' in href else base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99744'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dxalarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dxalarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="title"]//text()|//h1[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="文"]//following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label/b[text()="索"]//following::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="成文日期："]//following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="时"]//following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"meta_main")]//label[text()="发布机构："]//following::div[1]/text()').extract()).strip()
    if organ == '超级管理员':
        organ = ''
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('地区'):
        organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@class="conTxt"]|//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99744'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DXAL"
    zt_provider = "dxalgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省发展和改革委员会
def policy_fgwlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xzgfxwj_8188' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="cont_main"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://fgw.ln.gov.cn/{callmodel.sql_model.list_rawid}.html'
            url = parse.urljoin(base_url, href)
            if 'ln' not in url or 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99745'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="news-content-main"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="ContTextSize"]|//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99745'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWLN"
    zt_provider = "fgwlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省工业和信息化厅
def policy_gxtlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgk_145234' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="xxgk_rulzd"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://gxt.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99746'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//a[@class="xgxw"]/ancestor::tr[1]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://gxt.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99746'
                article_json["url"] = url
                title_info = li.xpath('//a[@class="xgxw"]/ancestor::tr[1]/td[2]/a/script/text()').extract_first().strip()
                article_json["title"] = re.findall("\('(.*?)', 50", title_info)[0]
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="zf_xxgktitle1"]//text()|//td[@class="nr_bt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99746'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTLN"
    zt_provider = "gxtlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省科学技术厅
def policy_kjtlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(" 共(\d+)页 ", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk_145234' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="govCListBox"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://kjt.ln.gov.cn/{callmodel.sql_model.list_rawid}.html'
            url = parse.urljoin(base_url, href)
            if 'ln' not in url or 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99747'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="govXLTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@class="govXLText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99747'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTLN"
    zt_provider = "kjtlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省教育厅
def policy_jytlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'-{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk_145234' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="l-gl-main cl"]/ul/li|//ul[@class="xxgk_rulzd"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://jyt.ln.gov.cn/{callmodel.sql_model.list_rawid}index.shtml'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99748'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="d_title"]//text()|//div[@class="zf_xxgktitle1"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@class="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99748'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTLN"
    zt_provider = "jytlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="news-attachment"]|//div[@class="gov_downloadsbox"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省民政厅
def policy_mztlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"].replace("-1", f"-{page}")}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk_145234' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="article_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://mzt.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            if rawid == 'index':
                rawid =  url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99749'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if '/lzyj/' in callmodel.sql_model.list_rawid:
                pub_date = ''
            else:
                pub_date = li.xpath('span/text()').extract_first().strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="article_content"]//h1//text()|//div[@class="zf_xxgktitle1"]//text()|//h1[@id="article_content_h1"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="news-info"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@id="fontzoom"]|//div[@id="ContTextSize"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99749'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTLN"
    zt_provider = "mztlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="news-attachment"]|//div[@class="articleAttachments"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省财政厅
def policy_cztlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'-{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk_145234' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="mod-newslist"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://czt.ln.gov.cn/{callmodel.sql_model.list_rawid}-1.shtml'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99750'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="news-content-main"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="news-info"]//li[contains(text(),"发布时间")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    # pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="ContTextSize"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99750'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTLN"
    zt_provider = "cztlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="news-attachment"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省人力资源和社会保障厅
def policy_rstlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '/lrs/' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="dataList"]/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://rst.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99751'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                article_json["pub_no"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
                article_json["written_date"] = ''.join(li.xpath('td[4]/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="rsw_dcwj"]//ul/li|//ul[@class="xxgk_rulzd"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://rst.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99751'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                if not li.xpath('span/text()').extract_first():
                    continue
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="news-content-main"]//h1//text()|//div[@class="zf_xxgktitle1"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = article_json.get('pub_no', '')
    written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@id="ContTextSize"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99751'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTLN"
    zt_provider = "rstlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="news-attachment"]|//div[@class="articleAttachments"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省农业农村厅
def policy_nynclnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/lrs/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="workmenu"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span/a/@href|a/@href').extract_first()
            base_url = f'http://nync.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99752'
            article_json["url"] = url
            if 'tzgg' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
            else:
                article_json["title"] = li.xpath('a/span[1]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('a/span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nynclnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynclnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="article_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = article_json.get('pub_no', '')
    # written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@class="article_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99752'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCLN"
    zt_provider = "nynclngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省住房和城乡建设厅
def policy_zjtlnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/tfwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="wjzy-lb-mode"]/li|//ul[@class="tp-lb-mode"]/li|//ul[@class="xxgk_rulzd"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('p[@class="wjzy-lb-wjm"]/a/@href|p[@class="tp-lb-title"]/a/@href|a/@href').extract_first()
            base_url = f'https://zjt.ln.gov.cn/{callmodel.sql_model.list_rawid}-1.shtml'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99753'
            article_json["url"] = url
            article_json["title"] = li.xpath('p[@class="wjzy-lb-wjm"]/a/text()|p[@class="tp-lb-title"]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('p[@class="wjzy-lb-fbrq"]/text()|p[@class="tp-lb-time"]/text()|span/text()').extract_first().strip()
            article_json["pub_no"] = ''.join(li.xpath('p[@class="wjzy-lb-wjh"]/text()').extract()).strip()
            article_json["written_date"] = ''.join(li.xpath('p[@class="wjzy-lb-cwrq"]/text()').extract()).replace('年月日','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtlnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtlnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xqy-xl-title"]//text()|//div[@class="zf_xxgktitle1"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = article_json.get('pub_no', '')
    written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@class="xqy-xl-room"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99753'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTLN"
    zt_provider = "zjtlngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@cclass="xqy-fjlist"]|//div[@class="news-attachment"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省卫生健康委员会
def policy_wsjklnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/tfwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="jgsz_right_con"]/ul/li|//ul[@class="xxgk_rulzd"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://wsjk.ln.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99754'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if 'fdzdgknr' in callmodel.sql_model.list_rawid:
                pub_date = li.xpath('span/text()').extract_first().strip()
            else:
                pub_date = ''
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjklnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjklnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="news-content-main"]//h1//text()|//div[@class="zf_xxgktitle1"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="news-info"]//text()').extract()).strip()
        pub_date_info = pub_date_info.split('发布时间')[-1]
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]

    # pub_no = article_json.get('pub_no', '')
    # written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@id="ContTextSize"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99754'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKLN"
    zt_provider = "wsjklngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="news-attachment"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省沈阳市
def policy_shenyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/tfwj/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list-sp"]/div|//ul[@class="xxgk_rul"]/li|//ul[@class="xsy_rul1"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="title"]/a/@href|a/@href').extract_first()
            base_url = f'http://www.shenyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99755'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="title"]/a/text()|a/text()').extract_first().strip()
            pub_date = li.xpath('a/span/text()|span/text()').extract_first()
            if not pub_date:
                pub_date = ''.join(li.xpath('div[@class="time_pub"]/text()').extract())
            article_json["pub_date"] = pub_date.strip()
            article_json["pub_no"] = ''.join(li.xpath('div[@class="futi"]/text()').extract()).strip()
            article_json["written_date"] = ''.join(li.xpath('div[@class="time_gen"]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shenyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shenyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="dlist_title"]//text()').extract()).strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="lysj"]/span[contains(text(),"时间：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]

    if not title:
        title = article_json['title'].strip()

    pub_no = article_json.get('pub_no', '')
    written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('地区'):
    #     organ = '大兴安岭' + organ

    fulltext_xpath = '//div[contains(@class,"dlist_con")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99755'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHENYANG"
    zt_provider = "shenyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="news-attachment"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省大连市
def policy_dllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 15)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.dl.gov.cn/col/col966/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99756'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/div[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/div[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.dl.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99756'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="htitle"]//text()|//div[@class="article-content"]//h3//text()|//div[@class="article-body"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="gk-bq"]//li[contains(text(),"文件编号:")]/following::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="gk-bq"]//li[contains(text(),"索引号:")]/following::li[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="gk-bq"]//li[contains(text(),"成文日期:")]/following::li[1]/span/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="gk-bq"]//li[contains(text(),"有效性:")]/following::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('///div[@class="gk-bq"]//li[contains(text(),"发布机构:")]/following::li[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '大连' + organ

    fulltext_xpath = '//div[@class="art_con"]|//div[@id="zoom"]|//div[@class="article-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99756'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DL"
    zt_provider = "dlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省鞍山市
def policy_anshanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("第 \d+/(\d+) 页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="zf-listwrap1"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.anshan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99757'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_anshanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("第\d+/(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="jmesa"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://www.anshan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99757'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            article_json["pub_no"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
            article_json["written_date"] = ''.join(li.xpath('td[4]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_anshanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="xiug-infore"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[2]/h1/a/@href').extract_first()
            base_url = f'http://cms.anshan.gov.cn/policy/policyList.ct'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99757'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[2]/h1/a/text()').extract_first().strip()
            article_json["pub_date"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_anshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_anshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="zf-centertitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//meta[@name="PubDate"]/@content').extract()).strip()
        if pub_date_info:
            pub_date = clean_pubdate(pub_date_info)
            pub_year = pub_date[:4]
    pub_no = article_json.get('pub_no', '')
    # index_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    written_date = article_json.get('written_date', '')
    # legal_status = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '鹤岗' + organ

    fulltext_xpath = '//div[@class="zf-centercent"]|//div[@class="text"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99757'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ANSHAN"
    zt_provider = "anshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省抚顺市
def policy_fushunlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 20)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            base_url = f'https://www.fushun.gov.cn/dynamic/zwgk/govInfoPub.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99758'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fushunlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("total: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.fushun.gov.cn/{callmodel.sql_model.list_rawid}/2.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99758'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fushunarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fushunarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="ewb-article box-shadow"]//h3//text()|//h1[@id="ivs_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="main-tb"]//td[contains(text(),"文号:")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"索引号")]/following::td[1]/span/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"主题分类")]/following::td[1]/span/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="main-tb"]//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="main-tb"]//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '抚顺' + organ

    fulltext_xpath = '//div[@class="mian-cont"]|//div[@class="ewb-article-info"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99758'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FUSHUN"
    zt_provider = "fushungovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省本溪市
def policy_benxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('/共(\d+)页<', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '/zfwj/' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="publicTable"]/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'https://www.benxi.gov.cn/{callmodel.sql_model.list_rawid}'
                url = parse.urljoin(base_url, href)
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99759'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="newsList"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.benxi.gov.cn/{callmodel.sql_model.list_rawid}'
                url = parse.urljoin(base_url, href)
                rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99759'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_benxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_benxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '本溪' + organ

    fulltext_xpath = '//div[@class="conTxt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99759'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BENXI"
    zt_provider = "benxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省丹东市
def policy_dandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("第 \d+/(\d+) 页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="main-list"]/li|//ul[@class="list1"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.dandong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99760'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("第\d+/(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="jmesa"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            if 'javascript:' in href:
                continue
            base_url = f'https://www.dandong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99760'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/span/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            article_json["pub_no"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
            article_json["written_date"] = ''.join(li.xpath('td[4]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="center-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = article_json.get('pub_no', '')
    # index_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    written_date = article_json.get('written_date', '')
    # legal_status = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '鹤岗' + organ

    fulltext_xpath = '//div[@class="center-info"]|//div[@class="text"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99760'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DANDONG"
    zt_provider = "dandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省锦州市
def policy_jzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('p_next p_fun.*?(\d+).htm', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count + 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page-page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if page_index == 0:
            base_url = f'http://www.jz.gov.cn/{callmodel.sql_model.list_rawid}.htm'
        else:
            base_url = f'http://www.jz.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
        if 'wj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="govnewslista1655"]/table/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99761'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                article_json["pub_no"] = ''.join(li.xpath('td[2]/text()').extract()).strip()
                article_json["written_date"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="list_lmul"]/li|//div[@class="list-box"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99761'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('label/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="divTitle"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = article_json.get('pub_no', '')
    # index_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    written_date = article_json.get('written_date', '')
    # legal_status = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '鹤岗' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99761'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JZ"
    zt_provider = "jzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//li[contains(text(),"附件")]/parent::ul[1])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省营口市
def policy_yingkoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 15)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            base_url = f'http://www.yingkou.gov.cn/dynamic/zw/publicinfo-search.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99762'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yingkoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("total: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'ksdqxzgs/c106694' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="infolist"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.yingkou.gov.cn/{callmodel.sql_model.list_rawid}/about.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99758'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span[@class="ewb-list-date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yingkouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yingkouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@id="ivs_title"]//text()|//h2[@id="qst-name"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="ewb-article-wrap"]//p[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//div[@class="ewb-detail-table"]//td[contains(text(),"索引号")]/following::td[1]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="ewb-article-wrap"]//p[contains(text(),"主题分类：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//table[@id="gk-info"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="ewb-article-wrap"]//p[contains(text(),"成文日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="ewb-article-wrap"]//p[contains(text(),"发布机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '营口' + organ

    fulltext_xpath = '//div[@id="ivs_content"]|//div[@id="qst-detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99762'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YINGKOU"
    zt_provider = "yingkougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省阜新市
def policy_fuxinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage = '(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="list1-ul-link"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href').extract_first()
            base_url = f'https://www.fuxin.gov.cn/{callmodel.sql_model.list_rawid}'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            if 'id=' in url:
                rawid = url.split('id=')[-1]
            else:
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99763'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[3]/div[2]/text()').extract_first().strip()
            article_json["pub_no"] = ''.join(li.xpath('div[2]/text()').extract()).strip()
            article_json["written_date"] = ''.join(li.xpath('div[3]/div[1]/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fuxinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fuxinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="DetailcontentTile"]//text()|//div[@id="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = article_json.get('pub_no', '')
    written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '本溪' + organ

    fulltext_xpath = '//div[@id="jt_news_content"]|//div[@id="Detailcontent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99763'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FUXIN"
    zt_provider = "fuxingovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省辽阳市
def policy_liaoyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('html", 0, 20, (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"news-list-items")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.liaoyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99764'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_liaoyanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 10)
        # total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['visiturl']
            base_url = f'http://www.liaoyang.gov.cn/gfxwj_inventory.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99764'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            article_json["pub_no"] = li['documentnumber']
            article_json["written_date"] = li['handsdate']
            article_json["organ"] = li['deptname']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_liaoyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_liaoyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = article_json.get('pub_no', '')
    # index_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    written_date = article_json.get('written_date', '')
    # legal_status = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    organ = article_json.get('organ', '')
    if organ.startswith('市'):
        organ = '辽阳' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99764'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LIAOYANG"
    zt_provider = "liaoyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省盘锦市
def policy_panjinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('pages:(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'list-{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '1873' in callmodel.sql_model.list_rawid or '1874' in callmodel.sql_model.list_rawid or '1875' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="main_right_szfwj_list"]/table/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'https://www.panjin.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99765'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="right_news_list"]/li|//div[@class="dlb_item"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.panjin.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99765'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_panjinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_panjinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="xwbt_txt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"发文字号：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索 引 号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//td[contains(text(),"主 题 词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发文机关：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '盘锦' + organ

    fulltext_xpath = '//div[@class="xwnr_txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99765'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PANJIN"
    zt_provider = "panjingovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省铁岭市
def policy_tielinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="list-wenjian links-xxgk-con"]/li|//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[@class="span6"]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.tieling.gov.cn/{callmodel.sql_model.list_rawid}.html'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99766'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('span[@class="span6"]/a/text()|a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('span[@class="span4"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tielingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tielingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content"]//h1//text()|//div[@class="listv_title"]//text()|//div[@class="article-tit"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//title/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="bd1"]//b[contains(text(),"发文字号：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="bd1"]//b[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="bd1"]//b[contains(text(),"发文机关：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '铁岭' + organ

    if 'zcjd/' in provider_url:
        fulltext_xpath1 = '//div[@class="jiedu fl"]//div[@class="wenjian_content"]'
        fulltext_xpath2 = '//div[@class="tujian"]//div[@class="wenjian_content"]'
        fulltext_xpath = fulltext_xpath1 + fulltext_xpath2
    else:
        fulltext_xpath = '//div[@class="content_con"]|//div[@class="listv_con"]|//div[@class="wenjian fl"]//div[@class="wenjian_content"]|//div[@class="content-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99766'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TIELING"
    zt_provider = "tielinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省朝阳市
def policy_chaoyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("第 \d+/(\d+) 页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list-wrap1 yt"]/li|//ul[@class="list1"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.chaoyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99767'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/b/text()|a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chaoyanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_ino": f'{list_json["page_ino"]}_{page}'}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '/zc/' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="xiug-infore"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[2]/h1/a/@href').extract_first()
            base_url = f'http://www.chaoyang.gov.cn/policy/policyList.ct'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99767'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[2]/h1/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[4]/p/text()').extract_first().strip()
            article_json["pub_no"] = ''.join(li.xpath('div[3]/p/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chaoyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chaoyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="center-title"]//text()|//div[@class="text01"]//text()|//div[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = article_json.get('pub_no', '')
    # index_no = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"索引号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"分类：")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"关键字：")]/following::span[1]/text()').extract()).strip()
    # written_date = article_json.get('written_date', '')
    # legal_status = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"时效：")]/following::span[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//ul[@class="xxgk-end-info"]//span[contains(text(),"发布机构：")]/following::span[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '鹤岗' + organ

    fulltext_xpath = '//div[@class="text"]|//div[@class="center-info"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99767'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHAOYANG"
    zt_provider = "chaoyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   辽宁省葫芦岛市
def policy_hldlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        # max_count = int(max_count[0]) if max_count else 1
        # total_page = max_count
        total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xxzgfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//td[@class="bt"]/parent::tr[1]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.hld.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99768'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                article_json["pub_no"] = ''.join(li.xpath('td[2]/text()').extract()).strip()
                article_json["written_date"] = ''.join(li.xpath('td[3]/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="columnlist-txt"]/li|//ul[@class="list-public"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href|a/@href').extract_first()
                base_url = f'http://www.hld.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99768'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()|a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()|span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hldarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hldarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content-hd"]//h1//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = article_json.get('pub_no', '')
    written_date = article_json.get('written_date', '')
    # index_no = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"关键词：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"时效：")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="ueditortable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '本溪' + organ

    fulltext_xpath = '//div[contains(@class,"content-article")]|//div[contains(@class,"wenjian-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99768'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HLD"
    zt_provider = "hldgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result
