import datetime
import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_jldrcjllist1_callback",
    "policy_jldrcjllist2_callback",
    "policy_jldrcjlarticle_callback",
    "policy_jldrcjlarticle_etl_callback",
    "policy_gxtjllist_callback",
    "policy_gxtjlarticle_callback",
    "policy_gxtjlarticle_etl_callback",
    "policy_kjtjllist1_callback",
    "policy_kjtjllist2_callback",
    "policy_kjtjlarticle_callback",
    "policy_kjtjlarticle_etl_callback",
    "policy_jytjllist_callback",
    "policy_jytjlarticle_callback",
    "policy_jytjlarticle_etl_callback",
    "policy_mztjllist1_callback",
    "policy_mztjllist2_callback",
    "policy_mztjlarticle_callback",
    "policy_mztjlarticle_etl_callback",
    "policy_cztjllist1_callback",
    "policy_cztjllist2_callback",
    "policy_cztjlarticle_callback",
    "policy_cztjlarticle_etl_callback",
    "policy_hrssjllist1_callback",
    "policy_hrssjllist2_callback",
    "policy_hrssjlarticle_callback",
    "policy_hrssjlarticle_etl_callback",
    "policy_agrijllist_callback",
    "policy_agrijlarticle_callback",
    "policy_agrijlarticle_etl_callback",
    "policy_jstjllist1_callback",
    "policy_jstjllist2_callback",
    "policy_jstjlarticle_callback",
    "policy_jstjlarticle_etl_callback",
    "policy_wsjkwjllist1_callback",
    "policy_wsjkwjllist2_callback",
    "policy_wsjkwjlarticle_callback",
    "policy_wsjkwjlarticle_etl_callback",
    "policy_changchunlist1_callback",
    "policy_changchunlist2_callback",
    "policy_changchunarticle_callback",
    "policy_changchunarticle_etl_callback",
    "policy_jlcitylist1_callback",
    "policy_jlcitylist2_callback",
    "policy_jlcityarticle_callback",
    "policy_jlcityarticle_etl_callback",
    "policy_sipinglist1_callback",
    "policy_sipinglist2_callback",
    "policy_sipingarticle_callback",
    "policy_sipingarticle_etl_callback",
    "policy_tonghualist1_callback",
    "policy_tonghualist2_callback",
    "policy_tonghuaarticle_callback",
    "policy_tonghuaarticle_etl_callback",
    "policy_cbslist1_callback",
    "policy_cbslist2_callback",
    "policy_cbsarticle_callback",
    "policy_cbsarticle_etl_callback",
    "policy_jlsylist1_callback",
    "policy_jlsylist2_callback",
    "policy_jlsyarticle_callback",
    "policy_jlsyarticle_etl_callback",
    "policy_jlbclist1_callback",
    "policy_jlbclist2_callback",
    "policy_jlbcarticle_callback",
    "policy_jlbcarticle_etl_callback",
    "policy_yanbianlist1_callback",
    "policy_yanbianlist2_callback",
    "policy_yanbianlist3_callback",
    "policy_yanbianarticle_callback",
    "policy_yanbianarticle_etl_callback",
    "policy_liaoyuanlist1_callback",
    "policy_liaoyuanlist2_callback",
    "policy_liaoyuanarticle_callback",
    "policy_liaoyuanarticle_etl_callback",


    "policy_scsgovlist_callback",
    "policy_scsgovarticle_callback",
    "policy_scsgovarticle_etl_callback",
    "policy_nppagovlist_callback",
    "policy_nppagovarticle_callback",
    "policy_nppagovarticle_etl_callback",
    "policy_ncacgovlist_callback",
    "policy_ncacgovarticle_callback",
    "policy_ncacgovarticle_etl_callback",
    "policy_chinafilmgovlist_callback",
    "policy_chinafilmgovarticle_callback",
    "policy_chinafilmgovarticle_etl_callback",
    "policy_saragovlist_callback",
    "policy_saragovarticle_callback",
    "policy_saragovarticle_etl_callback",
    "policy_gqbgovlist_callback",
    "policy_gqbgovarticle_callback",
    "policy_gqbgovarticle_etl_callback",
    "policy_idcpclist_callback",
    "policy_idcpcarticle_callback",
    "policy_idcpcarticle_etl_callback",
    "policy_cacgovlist_callback",
    "policy_cacgovarticle_callback",
    "policy_cacgovarticle_etl_callback",
    "policy_chinaminesafetylist_callback",
    "policy_chinaminesafetyarticle_callback",
    "policy_chinaminesafetyarticle_etl_callback",
    "policy_119govlist_callback",
    "policy_119govarticle_callback",
    "policy_119govarticle_etl_callback",
    "policy_ceagovlist_callback",
    "policy_ceagovarticle_callback",
    "policy_ceagovarticle_etl_callback",
    "policy_auditgovlist_callback",
    "policy_auditgovlist1_callback",
    "policy_auditgovarticle_callback",
    "policy_auditgovarticle1_callback",
    "policy_auditgovarticle_etl_callback",
    "policy_auditgovarticle1_etl_callback",
    "policy_sportgovlist_callback",
    "policy_sportgovarticle_callback",
    "policy_sportgovarticle_etl_callback",
    "policy_nrtagovlist_callback",
    "policy_nrtagovarticle_callback",
    "policy_nrtagovarticle_etl_callback",

]


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


# 吉林省发展和改革委员会
def policy_jldrcjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99769'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jldrcjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+)//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        # li_list = res.xpath('//div[@class="ny_right_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99769'
            if url.startswith("/"):
                url_before = "http://www.yunfu.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://" + list_rawid_alt
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jldrcjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jldrcjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="con_contitle"]/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99769'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JLDRCJL"
    zt_provider = "jldrcjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省工业和信息化厅
def policy_gxtjllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+)//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        # li_list = res.xpath('//div[@class="ny_right_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="glyleft2_ly"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99770'
            if url.startswith("/"):
                url_before = "http://gxt.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://gxt.jl.gov.cn/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="news_tit_ly"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//div[@id="main"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="zlyxwz"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99770'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTJL"
    zt_provider = "gxtjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[contains(@class, "fj1_ly")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省科学技术厅
def policy_kjtjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99771'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+)//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//table[@width="700"]//tr')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        url_path = './td[@class="td14biaot"]/a/@href'
        title_path = './td[@class="td14biaot"]/a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td[@class="td14biaot"]/a/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99771'
            if url.startswith("/"):
                url_before = "http://kjt.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://kjt.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[2]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//td[@class="font30"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99771'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTJL"
    zt_provider = "kjtjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省教育厅
def policy_jytjllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+)//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//table[@cellspacing="8"]//tr')
        if not li_list:
            li_list = res.xpath('//div[@class="glyleft2_ly"]/ul/li')
        url_path = './td[1]/a/@href'
        title_path = './td[1]/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./td[1]/a/text()").get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99772'
            if url.startswith("/"):
                url_before = "http://jyt.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://jyt.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[2]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//td[@class="newstitle"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//td[@id="newscon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//td[@class="newscon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99772'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTJL"
    zt_provider = "jytjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省民政厅
def policy_mztjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99773'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+)//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="zly_mzt_20170310mzywul"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/li')
        url_path = './a/@href'
        title_path = './a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/@title').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99773'
            if url.startswith("/"):
                url_before = "http://mzt.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://mzt.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[2]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="zly_xq_20170316_t navBlock"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="meng_biaoti"]/a/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99773'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTJL"
    zt_provider = "mztjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省财政厅
def policy_cztjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99774'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+)//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="ul_main"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99774'
            if url.startswith("/"):
                url_before = "http://czt.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://czt.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="biaoti_title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="neirong_content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99774'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTJL"
    zt_provider = "cztjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省人力资源和社会保障厅
def policy_hrssjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99775'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "news_list3")]/ul/li')
        # li_list = res.xpath('//div[@class="news_list3 navBlock"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        url_path = './a[2]/@href'
        title_path = './a[2]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[2]/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99775'
            if url.startswith("/"):
                url_before = "http://hrss.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://" + list_rawid_alt
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a[1]/span/text()').get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./a[1]/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrssjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//li[contains(text(),"发布时间:")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="meng_biaoti"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "meng_biaoti")]'
    # fulltext_xpath = '//div[@class="neirong_content"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99775'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSJL"
    zt_provider = "hrssjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省农业农村厅
def policy_agrijllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="zly_2018_lb w781  mt-15"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="glyleft2_ly"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99776'
            if url.startswith("/"):
                url_before = "http://agri.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://agri.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./em/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_agrijlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_agrijlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="zly_2018_xxbt"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//td[@class="newstitle"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//td[@class="newscon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99776'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AGRIJL"
    zt_provider = "agrijlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省住房和城乡建设厅
def policy_jstjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99777'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jstjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//table[@class="tab111"]//tr[@class="tab111"]')
        if not li_list:
            li_list = res.xpath('//div[@class="NewsList"]//tr//table//tr')
        url_path = './td[2]/a/@href'
        title_path = './td[2]/a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td[2]/a/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99777'
            if url.startswith("/"):
                url_before = "http://jst.jl.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://jst.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://jst.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jstjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jstjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="zly_xq_20170316_t navBlock"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zlyxwz_t2"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99777'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JSTJL"
    zt_provider = "jstjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省住房和城乡建设厅
def policy_wsjkwjllist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99778'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwjllist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="xx e_zfxxgk"]/div[@class="xxl xl"]')
        if not li_list:
            li_list = res.xpath('//div[@class="NewsList"]//tr//table//tr')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99778'
            if url.startswith("/"):
                url_before = "http://wsjkw.jl.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://wsjkw.jl.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://wsjkw.jl.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwjlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwjlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="s_bt"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '吉林' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="s_txt neirong_content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99778'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWJL"
    zt_provider = "wsjkwjlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省长春市
def policy_changchunlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99779'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changchunlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="left_xyhbox1"]//ul[@class="currency_ul"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="NewsList"]//tr//table//tr')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99779'
            if url.startswith("/"):
                url_before = "http://www.changchun.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.changchun.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.changchun.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changchunarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_changchunarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="s_bt"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '长春' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="Zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99779'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHANGCHUN"
    zt_provider = "changchungovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省吉林市
def policy_jlcitylist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99780'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jlcitylist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="zlyjq"]/ul')
        if not li_list:
            li_list = res.xpath('//ul[@class="list list_page"]/li')
        url_path = './a/@href'
        title_path = './a/span[@class="list_name"]/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./li/a[2]/text()').get()
            if url is None:
                url = li.xpath("./li/a[2]/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99780'
            if url.startswith("/"):
                url_before = "http://www.jlcity.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.jlcity.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.jlcity.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="list_time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jlcityarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jlcityarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    # pub_date = article_json['pub_date']
    # pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="detail_tittle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # if not pub_date or '0000' in pub_date:
    pub_date_info = ''.join(res.xpath('//span[@class="detail_top_time"]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//div[@id="effect2"]//td[contains(string(),"发布日期")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '吉林' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="detail_content"]|//div[@id="zoom"]'

    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99780'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JLCITY"
    zt_provider = "jlcitygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省四平市
def policy_sipinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99781'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sipinglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class, "ly_jhj_listul")]/li')
        # li_list = res.xpath('//div[@class="zlyjq"]/ul')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        url_path = './a/@href'
        title_path = './a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./li/a[2]/text()').get()
            if url is None:
                url = li.xpath("./li/a[2]/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99781'
            if url.startswith("/"):
                url_before = "http://www.siping.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://" + list_rawid_alt.replace(callmodel.sql_model.list_rawid, "").rstrip("/")
            elif url.__contains__("./"):
                url_before = "http://" + list_rawid_alt
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./em/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sipingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sipingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="newstitle"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '四平' + organ

    fulltext_xpath = '//div[@id="xx_conter1023"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="newscontnet j-fontContent"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99781'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SIPING"
    zt_provider = "sipinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省通化市
def policy_tonghualist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99783'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tonghualist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="cont_right_2"]/ul/div/li')
        if not li_list:
            li_list = res.xpath('//div[@class="NewsList"]//tr//table//tr')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99783'
            if url.startswith("/"):
                url_before = "http://www.tonghua.gov.cn"
            elif url.__contains__("../"):
                url_before = "hhttp://www.tonghua.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.tonghua.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tonghuaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tonghuaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="wzbt text-tag"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="s_bt"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '通化' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99783'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TONGHUA"
    zt_provider = "tonghuagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省白山市
def policy_cbslist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99784'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cbslist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class=" ul_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        url_path = './a[2]/@href'
        title_path = './a[2]/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:
                url = li.xpath('./a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99784'
            if url.startswith("/"):
                url_before = "http://www.cbs.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.cbs.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cbsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cbsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="wzbt text-tag"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '白山' + organ

    fulltext_xpath = '//td[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99784'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CBS"
    zt_provider = "cbsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省松原市
def policy_jlsylist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]

        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99785'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jlsylist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="submain_lj"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99785'
            if url.startswith("/"):
                url_before = "http://www.jlsy.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.jlsy.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.jlsy.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jlsyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jlsyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    def clear_text(text):
        if text is None:
            return ""
        else:
            return text
    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="wzbt text-tag"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//table[contains(@class,"pcc")]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract_first()

    index_no = res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract_first()
    keyword = res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract_first()

    subject = res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract_first()

    written_date = res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract_first()

    legal_status = res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract_first()

    organ = res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract_first()
    organ = clear_text(organ)
    if organ.startswith('市'):
        organ = '松原' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99785'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JLSY"
    zt_provider = "jlsygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clear_text(pub_no)
    data['index_no'] = clear_text(index_no)
    data['subject'] = clear_text(subject)
    data['keyword'] = clear_text(keyword)
    data['organ'] = clear_text(organ)
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result

# 吉林省白城市
def policy_jlbclist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99786'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jlbclist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="Generic_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:
                # url = li.xpath('./a/@href').get()
                # if url is None:
                continue
            elif 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99786'
            if url.startswith("/"):
                url_before = "http://www.jlbc.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.jlbc.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jlbcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jlbcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//td[@class="wzbt text-tag"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '白城' + organ

    fulltext_xpath = '//div[@id="zoom1"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]|//div[@id="xx_conter1023"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99786'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JLBC"
    zt_provider = "jlbcgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result



# 吉林省延边朝鲜族自治州
def policy_yanbianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99787'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_yanbianlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99787'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_yanbianlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="write_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './a/@href'
        title_path = './a//div[@class="write_block_name"]/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:
                # url = li.xpath('./a/@href').get()
                # if url is None:
                continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99787'
            if url.startswith("/"):
                url_before = "http://www.yanbian.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.yanbian.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a/span/text()').get()
            if not pub_date_before:
                # pub_date_before = li.xpath("./a/span/text()").get()
                pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_yanbianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result

def policy_yanbianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]


    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('州'):
        organ = '延边朝鲜族自治' + organ

    fulltext_xpath = '//div[@id="xx_conter1023"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99787'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YANBIAN"
    zt_provider = "yanbiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 吉林省辽源市
def policy_liaoyuanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["num"]
        total_page = math.ceil(int(total) / 16)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["puburl"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99782'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["tip"]["dates"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_liaoyuanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('countPage = (\d+) *?//共多少页', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="syxx_list"]/ol/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './a/@href'
        title_path = './a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
            if url is None:
                # url = li.xpath('./a/@href').get()
                # if url is None:
                continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99782'
            if url.startswith("/"):
                url_before = "http://www.liaoyuan.gov.cn/xxgk"
            elif url.__contains__("../"):
                url_before = "http://www.liaoyuan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.liaoyuan.gov.cn/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            # pub_date_before = li.xpath('./a/span/text()').get()
            # if not pub_date_before:
            #     # pub_date_before = li.xpath("./a/span/text()").get()
            #     pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
            #     pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            #     pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_liaoyuanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result

def policy_liaoyuanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        # pub_date = ''.join(res.xpath(
        # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]


    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@class="sywzy_h2"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '辽源' + organ

    fulltext_xpath = '//div[@id="xx_conter1023"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="contents_div"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99782'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LIAOYUAN"
    zt_provider = "liaoyuangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


#   国家公务员局
def policy_scsgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="gjgwy_main_er_main"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            # base_url = f'http://www.qinghai.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            base_url = f'http://www.scs.gov.cn/zcfg/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99789'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_scsgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_scsgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    from bs4 import BeautifulSoup
    soup = BeautifulSoup(html, 'html.parser')
    fixed_html = soup.prettify()
    res = Selector(text=fixed_html)

    title = ''.join(res.xpath('//div[@class="san_main"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@class="san_main"]/h5/following::div[1]|//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99789'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SCSGOV"
    zt_provider = "scsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家新闻出版总署
def policy_nppagovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("_pageCount = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="m2nrul"]/li|//ul[@class="mesgopen2"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.nppa.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99790'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('div/a/text()|a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nppagovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nppagovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = re.findall(' _title = "(.*?)";', html)[0]
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@class="m3pageEdit"]|//div[@class="gsj_htmlcon_bot"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99790'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NPPAGOV"
    zt_provider = "nppagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家版权局
def policy_ncacgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(.*?)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="m2imgList"]/li|//ul[@class="m2newsList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            # base_url = f'https://www.nppa.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            base_url = f'https://www.ncac.gov.cn/chinacopyright/channels/520.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99791'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/strong/text()|a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('span/text()|a/p/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ncacgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ncacgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="m3nt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@class="m3nEditor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99791'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NCACGOV"
    zt_provider = "ncacgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家电影局
def policy_chinafilmgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="tt-list"]/li|//ul[@class="m2ru1"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="Hotnews"]/div/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.chinafilm.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99792'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('div[@class="Hotnews"]/div/a/text()|a/text()').extract()).strip()
            article_json["pub_date"] = ''.join(li.xpath('span/text()|div[@class="tt-time"]//text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chinafilmgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chinafilmgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    if '_doctitle' in html:
        title = re.findall("_doctitle = '(.*?)'", html)[0]
    else:
        title = ''.join(res.xpath('//h2[@id="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@id="body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99792'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHINAFILMGOV"
    zt_provider = "chinafilmgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家宗教事务局
def policy_saragovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"pages":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page +1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res_json = json.loads(para_dicts["data"]["1_1"]['html'])
        for li in res_json['info']['list']:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['contentStaticPage']
            if not href:
                continue
            # base_url = f'https://www.chinafilm.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            base_url = f'https://www.sara.gov.cn/web/xzxk/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99793'
            article_json["url"] = url
            article_json["title"] = li['contentTitle']
            article_json["pub_date"] = datetime.datetime.fromtimestamp(int(li['contentPublishTime'])/1000).strftime('%Y%m%d')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_saragovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_saragovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="article"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99793'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SARAGOV"
    zt_provider = "saragovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家宗教事务局
def policy_gqbgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="paging"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="left_list_hjpd_bt"]/a/@href').extract_first()
            if not href:
                continue
            # base_url = f'https://www.chinafilm.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            base_url = f'https://www.gqb.gov.cn/gqb/ggl/index.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99794'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('div[@class="left_list_hjpd_bt"]/a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('div[@class="time_hjpd"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gqbgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gqbgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="caijingt_wztop"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@id="fontzoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99794'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GQBGOV"
    zt_provider = "gqbgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中共中央对外联络部
def policy_idcpclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="ksly_list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.idcpc.org.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99795'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('div[1]/a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('div[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_idcpcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_idcpcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="flagsj"]//h3[@class="xl_h3"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@class="content_wx_pic"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99795'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "IDCPC"
    zt_provider = "idcpcpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中央网络安全和信息化委员会办公室
def policy_cacgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"totalRec":"(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(int(max_count)/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res_json = json.loads(para_dicts["data"]["1_1"]['html'])
        for li in res_json['list']:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            if not href:
                continue
            # base_url = f'https://www.chinafilm.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            base_url = f'https://www.cac.gov.cn/wxzw/wxfb/A093702index_1.htm'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99796'
            article_json["url"] = url
            article_json["title"] = li['topic']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cacgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cacgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@id="BodyLabel"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99796'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CACGOV"
    zt_provider = "cacgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家矿山安全监察局
def policy_chinaminesafetylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@id="ogi-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.chinamine-safety.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99797'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chinaminesafetyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chinaminesafetyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="pages_content"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@id="nrBox"]|//div[@class="zhenwen_neir"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99797'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHINAMINESAFETY"
    zt_provider = "chinaminesafetypolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家消防救援局
def policy_119govlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("index_(\d+)\.shtml", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@id="recommend"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.119.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99798'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/div/h3/text()').extract()).strip()
            article_json["pub_date"] = li.xpath('a/div/div/span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_119govarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_119govarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="t-container-title"]/h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@class="b-container"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99798'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "119GOV"
    zt_provider = "119govpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中国地震局
def policy_ceagovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalpage="(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="listNews pagelib"]/ul/li|//div[@class="zfxxgk_list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.cea.gov.cn/cea/zwgk/zcjd/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99799'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a/@title').extract()).strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ceagovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ceagovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@id="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@id="Zoom"]|//div[@id="news_content"]|//div[@class="vF_detail_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99799'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CEAGOV"
    zt_provider = "ceagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   中华人民共和国审计署
def policy_auditgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('maxPageNum = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'n4/n19' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_10044770_{page}"}
                else:
                    dic = {"page_info": f"index_10067955_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//dl')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('dt/a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.audit.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99801'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('dt/a/@title').extract()).strip()
            article_json["pub_date"] = li.xpath('dd/a/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_auditgovlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"totalPageNum":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page +1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res_json = json.loads(re.findall('\{.*\}', para_dicts["data"]["1_1"]['html'])[0])
        for li in res_json['resultMap']:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['id']
            if not href:
                continue
            # base_url = f'https://www.chinafilm.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            # base_url = f'https://www.sara.gov.cn/web/xzxk/index.html'
            url = f'https://www.audit.gov.cn/gdnps/html/content.jsp?id={href}'
            if 'htm' not in url:
                continue

            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = href
            temp["sub_db_id"] = '99801'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li.get('zlrq', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result



def policy_auditgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result

def policy_auditgovarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_auditgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="con-article-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date = clean_pubdate(''.join(res.xpath('//dd[@class="fb-time"]//text()').extract()).strip())
    pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''
    index_no = ''
    subject = ''
    legal_status = ''
    organ = ''

    fulltext_xpath = '//div[@id="textSize"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99801'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AUDITGOV"
    zt_provider = "auditgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result

def policy_auditgovarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json.get('pub_date', ''))
    pub_year = pub_date[:4]
    res_json = json.loads(re.findall('\{.*\}', html)[0])

    res = Selector(text=res_json['resultMap'][0]['htmlContent'])

    title = res_json['resultMap'][0]['title']
    if not title:
        title = article_json['title'].strip()
    pub_no = res_json['resultMap'][0].get('wh', '')
    index_no = res_json['resultMap'][0].get('syh', '')
    subject = res_json['resultMap'][0].get('ztfl', '')
    legal_status = ''
    organ = res_json['resultMap'][0].get('fbjg', '')

    # fulltext_xpath = '//div[@id="Zoom"]|//div[@id="news_content"]'
    fulltext = res_json['resultMap'][0]['htmlContent']
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99801'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AUDITGOV"
    zt_provider = "auditgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家体育总局
def policy_sportgovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"totalPageNum":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page +1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res_json = json.loads(re.findall('\{.*\}', para_dicts["data"]["1_1"]['html'])[0])
        for li in res_json['resultMap']:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['id']
            if not href:
                continue
            # base_url = f'https://www.chinafilm.gov.cn/{callmodel.sql_model.list_rawid}/index_1.html'
            # base_url = f'https://www.sara.gov.cn/web/xzxk/index.html'
            if 'treeValue=svobjcat' in callmodel.sql_model.list_rawid:
                url = f'https://www.sport.gov.cn/gdnps/content.jsp?id={href}'
            else:
                url = f'https://www.sport.gov.cn/gdnps/html/zhengce/contentlist.jsp?id={href}'

            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = href
            temp["sub_db_id"] = '99802'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li.get('zlrq', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sportgovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sportgovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']

    res_json = json.loads(re.findall('\{.*\}', html)[0])
    res = Selector(text=res_json['resultMap'][0]['htmlContent'])

    pub_date = clean_pubdate(res_json['resultMap'][0]['scrq'])
    pub_year = pub_date[:4]

    title = res_json['resultMap'][0]['title']
    if not title:
        title = article_json['title'].strip()
    pub_no = res_json['resultMap'][0].get('wh', '')
    index_no = res_json['resultMap'][0].get('syh', '')
    subject = res_json['resultMap'][0].get('ztfl', '')
    legal_status = ''
    organ = res_json['resultMap'][0].get('fbjg', '')

    # fulltext_xpath = '//div[@id="Zoom"]|//div[@id="news_content"]'
    fulltext = res_json['resultMap'][0]['htmlContent']
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99802'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SPORTGOV"
    zt_provider = "sportgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   国家广播电视总局
def policy_nrtagovlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 15)
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 15 + 1
                end = (page + 2) * 15
                if end >= max_count:
                    end = max_count
                dic = {"start": f"{start}", "end": f"{end}", "page_info": f"{list_json['page_info']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            print(li.extract())
            result.befor_dicts.update.update({'page': total_page})
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|div/a/@href').extract_first()
            base_url = f'https://www.nrta.gov.cn/col/col3730/index.html'
            url = parse.urljoin(base_url, href)
            if 'art_' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99803'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|div/a/text()').extract_first()
            article_json["pub_date"] = li.xpath('div/a/span/text()|span/text()|b/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nrtagovarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nrtagovarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = cleaned(article_json['title'])
    provider_url = article_json['url']

    res = Selector(text=html)
    title = cleaned(res.xpath('//div[@id="artTitPc"]//text()|//h1[@class="xxgk_gz_wztitle"]//text()').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_date = cleaned(article_json['pub_date'])
    pub_year = pub_date[:4]

    index_no = ''.join(res.xpath('//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布单位")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文　　号")]/following::td[1]/text()').extract()).strip()
    if not pub_date:
        raise Exception
    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99803'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NRTAGOV"
    zt_provider = "nrtagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['organ'] = organ
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result
